提交 5ad452d1 编写于 作者: 檀越@新空间's avatar 檀越@新空间 🐭

fix:删除chatgpt

上级 bb7e548f
[project]
# If true (default), the app will be available to anonymous users.
# If false, users will need to authenticate and be part of the project to use the app.
public = true
# The project ID (found on https://cloud.chainlit.io).
# If provided, all the message data will be stored in the cloud.
# The project ID is required when public is set to false.
#id = ""
# Whether to enable telemetry (default: true). No personal data is collected.
enable_telemetry = true
# List of environment variables to be provided by each user to use the app.
user_env = []
[UI]
# Name of the app and chatbot.
name = "Chatbot"
# Description of the app and chatbot. This is used for HTML tags.
# description = ""
# The default value for the expand messages settings.
default_expand_messages = false
# Hide the chain of thought details from the user in the UI.
hide_cot = false
# Link to your github repo. This will add a github button in the UI's header.
# github = ""
[meta]
generated_by = "0.4.1"
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4"> <module version="4">
<component name="NewModuleRootManager"> <component name="PyDocumentationSettings">
<content url="file://$MODULE_DIR$" /> <option name="format" value="PLAIN" />
<orderEntry type="inheritedJdk" /> <option name="myDocStringFormat" value="Plain" />
<orderEntry type="sourceFolder" forTests="false" />
</component> </component>
</module> </module>
\ No newline at end of file
[project]
# If true (default), the app will be available to anonymous users.
# If false, users will need to authenticate and be part of the project to use the app.
public = true
# The project ID (found on https://cloud.chainlit.io).
# The project ID is required when public is set to false or when using the cloud database.
#id = ""
# Uncomment if you want to persist the chats.
# local will create a database in your .chainlit directory (requires node.js installed).
# cloud will use the Chainlit cloud database.
# custom will load use your custom client.
# database = "local"
# Whether to enable telemetry (default: true). No personal data is collected.
enable_telemetry = true
# List of environment variables to be provided by each user to use the app.
user_env = []
[UI]
# Name of the app and chatbot.
name = "Chatbot"
# Description of the app and chatbot. This is used for HTML tags.
# description = ""
# The default value for the expand messages settings.
default_expand_messages = false
# Hide the chain of thought details from the user in the UI.
hide_cot = false
# Link to your github repo. This will add a github button in the UI's header.
# github = ""
[meta]
generated_by = "0.4.2"
from langchain import PromptTemplate, LLMChain
from langchain.chat_models import ChatOpenAI
import chainlit as cl
import os
# os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
# os.environ["OPENAI_API_KEY"] = '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'
# os.environ["OPENAI_API_KEY"] = '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'
# os.environ["OPENAI_API_KEY"] = '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'
os.environ["OPENAI_API_KEY"] = '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'
template = """Question: {question}
Answer: Let's think step by step."""
@cl.langchain_factory(use_async=False)
def factory():
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=ChatOpenAI(temperature=0, streaming=True))
return llm_chain
import os
import chainlit as cl
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)
# 公司的key
os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
os.environ["OPENAI_API_BASE"] = 'https://opencatgpt.openai.azure.com/'
os.environ["OPENAI_API_TYPE"] = 'azure'
os.environ["OPENAI_API_VERSION"] = '2023-05-15'
chat = ChatOpenAI(model_name="gpt-35-turbo", engine="gpt-35-turbo")
history = [SystemMessage(content="你是一个聊天机器人,请回答下列问题。\n")]
@cl.on_message
async def main(message: str):
history.append(HumanMessage(content=message))
res = await cl.make_async(sync_func)()
history.append(res)
await cl.Message(content=f"{res.content}").send()
def sync_func():
return chat(history)
# Welcome to 丽影洞察! 🚀🤖
\ No newline at end of file
import chainlit as cl
@cl.on_chat_start
async def start():
file = None
# Wait for the user to upload a file
while files == None:
files = await cl.AskFileMessage(
content="Please upload a text file to begin!", accept=["text/plain"]
).send()
# Decode the file
text_file = files[0]
text = text_file.content.decode("utf-8")
# Let the user know that the system is ready
await cl.Message(
content=f"`{text_file.name}` uploaded, it contains {len(text)} characters!"
).send()
import chainlit as cl
async def some_function():
file = await cl.AskFileMessage(
content="Please upload a python file to begin!", accept={"text/plain": [".py"]}
).send()
import chainlit as cl
@cl.on_message # this function will be called every time a user inputs a message in the UI
async def main(message: str):
# this is an intermediate step
await cl.Message(author="Tool 1", content=f"Response from tool1", indent=1).send()
# send back the final answer
await cl.Message(content=f"This is the final answer").send()
import os
import chainlit as cl
from pymysql import Connection
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)
# 构建到MySQL数据库的链接
conn = Connection(
host="localhost", # 主机名(IP)
port=3306, # 端口
user="root", # 账户
password="123456", # 密码
autocommit=True # 自动提交(确认)
)
# 公司的key
os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
os.environ["OPENAI_API_BASE"] = 'https://opencatgpt.openai.azure.com/'
os.environ["OPENAI_API_TYPE"] = 'azure'
os.environ["OPENAI_API_VERSION"] = '2023-05-15'
chat = ChatOpenAI(model_name="gpt-35-turbo", engine="gpt-35-turbo")
history = [SystemMessage(content="你是一个聊天机器人,请回答下列问题。\n")]
@cl.on_message
async def main(message: str):
history.append(HumanMessage(content=message))
res = await cl.make_async(sync_func)()
insert(message, res)
history.append(res)
await cl.Message(content=f"{res.content}").send()
def sync_func():
return chat(history)
def insert(que, res):
cursor = conn.cursor() # 获取到游标对象
# 选择数据库
conn.select_db("kwan")
# 执行sql
cursor.execute("insert into chatbot(question,response) values(que, res)")
# # 通过commit确认
# conn.commit()
# 关闭链接
conn.close()
此差异已折叠。
import os
from langchain import PromptTemplate, OpenAI, LLMChain
import chainlit as cl
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"
os.environ["OPENAI_API_KEY"] = "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"
template = """Question: {question}
Answer: Let's think step by step."""
@cl.langchain_factory(use_async=True)
def factory():
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)
return llm_chain
import openai
import chainlit as cl
openai.proxy = 'http://127.0.0.1:7890'
openai.api_key = "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"
model_name = "gpt-3.5-turbo"
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
@cl.on_chat_start
def start_chat():
cl.user_session.set(
"message_history",
[{"role": "system", "content": "You are a helpful assistant."}],
)
@cl.on_message
async def main(message: str):
message_history = cl.user_session.get("message_history")
message_history.append({"role": "user", "content": message})
msg = cl.Message(content="")
async for stream_resp in await openai.ChatCompletion.acreate(
model=model_name, messages=message_history, stream=True, **settings
):
token = stream_resp.choices[0]["delta"].get("content", "")
await msg.stream_token(token)
message_history.append({"role": "assistant", "content": msg.content})
await msg.send()
import openai
import chainlit as cl
openai.proxy = 'http://127.0.0.1:7890'
openai.api_key = "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"
model_name = "text-davinci-003"
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["```"],
}
prompt = """Answer the following question:
{question}
"""
@cl.on_message
async def main(message: str):
fromatted_prompt = prompt.format(question=message)
msg = cl.Message(
content="",
prompt=fromatted_prompt,
llm_settings=cl.LLMSettings(model_name=model_name, **settings),
)
async for stream_resp in await openai.Completion.acreate(
model=model_name, prompt=fromatted_prompt, stream=True, **settings
):
token = stream_resp.get("choices")[0].get("text")
await msg.stream_token(token)
await msg.send()
\ No newline at end of file
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)
# 公司的key
os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
os.environ["OPENAI_API_BASE"] = 'https://opencatgpt.openai.azure.com/'
os.environ["OPENAI_API_TYPE"] = 'azure'
os.environ["OPENAI_API_VERSION"] = '2023-05-15'
chat = ChatOpenAI(model_name="gpt-35-turbo", engine="gpt-35-turbo")
history = [SystemMessage(content="你是一个聊天机器人,请回答下列问题。\n")]
while True:
question = input("请输入问题:\n")
history.append(HumanMessage(content=question))
res = chat(history)
print(res.content)
history.append(res)
# 使用urllib发送GET请求的示例代码如下:
import urllib.request
response = urllib.request.urlopen('https://www.baidu.com/')
html = response.read()
print(html)
# 使用requests发送GET请求的示例代码如下:
import requests
response = requests.get('https://www.baidu.com/')
print(response.text)
# 对于POST请求,您可以通过向请求函数传递data参数来发送数据,如下所示:
import urllib.request
import urllib.parse
url = 'https://www.baidu.com/'
values = {'name': 'John', 'age': 25}
data = urllib.parse.urlencode(values)
data = data.encode('ascii') # data应该是bytes类型
req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req)
print(response.read())
# 使用requests发送POST请求的示例代码如下:
import requests
url = 'https://www.baidu.com/'
values = {'name': 'John', 'age': 25}
response = requests.post(url, data=values)
print(response.text)
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册