Skip to content

Instantly share code, notes, and snippets.

@goldengrape
goldengrape / implicit_func_2nd_diff.py
Last active January 5, 2024 23:30
用sympy求隐函数二阶导数
from sympy import symbols, Function, Eq,log,solve, simplify
def derivative_hidden_function(equation, x, y_func,N):
# 对方程关于 x 求导,使用链式法则
diff_eq = equation.lhs.diff(x) - equation.rhs.diff(x)
# 解出 dy/dx
dy_dx = solve(diff_eq, y_func.diff(x))[0]
if N==1:
return dy_dx
diy_dxi=dy_dx

你是研究助理GPT。 你能够根据用户的要求,从学术和专利搜索引擎检索文献或者专利。在检索时,你应当参考下面“构建检索URL的方法”,构建检索URL,用voxscript的GetWebsiteContent功能访问该URL,并取回内容。 你能够帮助用户阅读文献或者专利,向用户解释文献或者专利,帮助用户理解。 你应当使用中文和英文检索,然后用中文回答

构建检索URL的方法

PMC:

https://www.ncbi.nlm.nih.gov/pmc/?term=myopia

复杂检索式举例:

@goldengrape
goldengrape / GPT Gist
Created November 16, 2023 22:30
Hello World From GPT
Hello World From GPT
# * write_to_file
# Writes text to a file. If file exists, appends the text at the end. If not, creates a new file.
# * del_file
# Deletes a file.
# * read_file
# Reads a file and returns the content.
# * add_task
@goldengrape
goldengrape / ChatGPT-Translate-Long-Text.js
Created October 1, 2023 19:39 — forked from JimLiu/ChatGPT-Translate-Long-Text.js
使用ChatGPT自动分页翻译长文
// WARNING:此脚本仅做学习和演示用途,在不了解其用途前不建议使用
// 本脚本的用途是将输入内容分页,每次提取一页内容,编辑第二条消息,发送,然后收集结果
// 使用前,需要有两条消息,参考模板 https://chat.openai.com/share/17195108-30c2-4c62-8d59-980ca645f111
// 演示视频: https://www.bilibili.com/video/BV1tp4y1c7ME/?vd_source=e71f65cbc40a72fce570b20ffcb28b22
//
(function (fullText) {
const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
const groupSentences = (fullText, maxCharecters = 2800) => {
const sentences = fullText.split("\n").filter((line) => line.trim().length > 0);
import streamlit as st
import numpy as np
from streamlit_webrtc import WebRtcMode, webrtc_streamer
# from streamlit_webrtc import VideoTransformerBase, VideoTransformerContext
from pydub import AudioSegment
import queue, pydub, tempfile, openai, os, time
def save_audio(audio_segment: AudioSegment, base_filename: str) -> None:
from langchain.chat_models import ChatOpenAI
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools import AIPluginTool
import pandas as pd
import requests
from langchain.tools.plugin import AIPlugin
import json
import yaml
from langchain.llms import OpenAI
from langchain.callbacks.base import BaseCallbackHandler
import azure.cognitiveservices.speech as speechsdk
import os
import base64
import time
class StreamDisplayHandler(BaseCallbackHandler):
def __init__(self, container, initial_text="", display_method='markdown'):
self.container = container
self.text = initial_text
self.display_method = display_method
from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferMemory
advisor_list = [
{
"color": "White",
"name": "荀彧",
"personality": "You focus on facts and data. You approach problems objectively, ignoring personal biases.",
"work": "Your role is to provide factual information, helping to clarify the situation. You need to give your own independent opinion and not be distracted by others."
},
{
def query_prompt(query):
ask = f"""
You are a professional logic researcher.
I will give you a QUESTION that you need to generate a query statement in google scholar based on this question.
Here is an example of a google scholar query statement:
orthokeratology retina child OR children "myopia control" -atropine
It means:
Contains all words: orthokeratology retina