fix:异步执行

上级 132a3dfe
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="DataSourceManagerImpl" format="xml" multifile-model="true">
<data-source source="LOCAL" name=".langchain" uuid="6ed2f640-6bd4-426b-bab0-933c1e5074e0">
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/00-chartgpt/chainlit/.chainlit/.langchain.db</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
</component>
</project>
\ No newline at end of file
from langchain import PromptTemplate, LLMChain
from langchain.chat_models import ChatOpenAI
import chainlit as cl
import os
# os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
# os.environ["OPENAI_API_KEY"] = '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'
# os.environ["OPENAI_API_KEY"] = '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'
# os.environ["OPENAI_API_KEY"] = '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'
os.environ["OPENAI_API_KEY"] = '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'
template = """Question: {question}
Answer: Let's think step by step."""
@cl.langchain_factory(use_async=False)
def factory():
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=ChatOpenAI(temperature=0, streaming=True))
return llm_chain
import os
import chainlit as cl
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)
# 公司的key
os.environ["OPENAI_API_KEY"] = '70846324f01c4e5cb3cc11da28a1e091'
os.environ["OPENAI_API_BASE"] = 'https://opencatgpt.openai.azure.com/'
os.environ["OPENAI_API_TYPE"] = 'azure'
os.environ["OPENAI_API_VERSION"] = '2023-05-15'
chat = ChatOpenAI(model_name="gpt-35-turbo", engine="gpt-35-turbo")
history = [SystemMessage(content="你是一个聊天机器人,请回答下列问题。\n")]
@cl.on_message # this function will be called every time a user inputs a message in the UI
async def main(message: str):
# history = [SystemMessage(content="你是一个聊天机器人,请回答下列问题。\n")]
history.append(HumanMessage(content=message))
res = await cl.make_async(sync_func)()
# res = chat(history)
print(res.content)
# this is an intermediate step
# await cl.Message(author="Tool 1", content=f"Response from tool1", indent=1).send()
# send back the final answer
history.append(res)
await cl.Message(content=f"{res.content}").send()
def sync_func():
return chat(history)
import chainlit as cl
@cl.on_chat_start
async def start():
file = None
# Wait for the user to upload a file
while files == None:
files = await cl.AskFileMessage(
content="Please upload a text file to begin!", accept=["text/plain"]
).send()
# Decode the file
text_file = files[0]
text = text_file.content.decode("utf-8")
# Let the user know that the system is ready
await cl.Message(
content=f"`{text_file.name}` uploaded, it contains {len(text)} characters!"
).send()
import chainlit as cl
async def some_function():
file = await cl.AskFileMessage(
content="Please upload a python file to begin!", accept={"text/plain": [".py"]}
).send()
import chainlit as cl
@cl.on_message # this function will be called every time a user inputs a message in the UI
async def main(message: str):
# this is an intermediate step
await cl.Message(author="Tool 1", content=f"Response from tool1", indent=1).send()
# send back the final answer
await cl.Message(content=f"This is the final answer").send()
import os
from langchain import PromptTemplate, OpenAI, LLMChain
import chainlit as cl
# os.environ["OPENAI_API_KEY"] = "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"
os.environ["OPENAI_API_KEY"] = "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"
template = """Question: {question}
Answer: Let's think step by step."""
@cl.langchain_factory(use_async=True)
def factory():
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)
return llm_chain
import openai
import chainlit as cl
openai.api_key = "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"
# model_name = "text-davinci-003"
model_name = "gpt-3.5-turbo"
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
@cl.on_chat_start
def start_chat():
cl.user_session.set(
"message_history",
[{"role": "system", "content": "You are a helpful assistant."}],
)
@cl.on_message
async def main(message: str):
message_history = cl.user_session.get("message_history")
message_history.append({"role": "user", "content": message})
msg = cl.Message(content="")
async for stream_resp in await openai.ChatCompletion.acreate(
model=model_name, messages=message_history, stream=True, **settings
):
token = stream_resp.choices[0]["delta"].get("content", "")
await msg.stream_token(token)
message_history.append({"role": "assistant", "content": msg.content})
await msg.send()
import openai
import chainlit as cl
openai.api_key = "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"
model_name = "text-davinci-003"
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["```"],
}
prompt = """Answer the following question:
{question}
"""
@cl.on_message
async def main(message: str):
fromatted_prompt = prompt.format(question=message)
msg = cl.Message(
content="",
prompt=fromatted_prompt,
llm_settings=cl.LLMSettings(model_name=model_name, **settings),
)
async for stream_resp in await openai.Completion.acreate(
model=model_name, prompt=fromatted_prompt, stream=True, **settings
):
token = stream_resp.get("choices")[0].get("text")
await msg.stream_token(token)
await msg.send()
# 使用urllib发送GET请求的示例代码如下:
import urllib.request
response = urllib.request.urlopen('https://www.baidu.com/')
html = response.read()
print(html)
# 使用requests发送GET请求的示例代码如下:
import requests
response = requests.get('https://www.baidu.com/')
print(response.text)
# 对于POST请求,您可以通过向请求函数传递data参数来发送数据,如下所示:
import urllib.request
import urllib.parse
url = 'https://www.baidu.com/'
values = {'name': 'John', 'age': 25}
data = urllib.parse.urlencode(values)
data = data.encode('ascii') # data应该是bytes类型
req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req)
print(response.read())
# 使用requests发送POST请求的示例代码如下:
import requests
url = 'https://www.baidu.com/'
values = {'name': 'John', 'age': 25}
response = requests.post(url, data=values)
print(response.text)
"""
01.两数之和
"""
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
......
"""
26. 删除有序数组中的重复项
给你一个 升序排列 的数组 nums ,请你 原地 删除重复出现的元素,使每个元素 只出现一次 ,返回删除后数组的新长度。元素的 相对顺序 应该保持 一致 。然后返回 nums 中唯一元素的个数。
考虑 nums 的唯一元素的数量为 k ,你需要做以下事情确保你的题解可以被通过:
更改数组 nums ,使 nums 的前 k 个元素包含唯一元素,并按照它们最初在 nums 中出现的顺序排列。nums 的其余元素与 nums 的大小不重要。
返回 k 。
判题标准:
系统会用下面的代码来测试你的题解:
int[] nums = [...]; // 输入数组
int[] expectedNums = [...]; // 长度正确的期望答案
int k = removeDuplicates(nums); // 调用
assert k == expectedNums.length;
for (int i = 0; i < k; i++) {
assert nums[i] == expectedNums[i];
}
如果所有断言都通过,那么您的题解将被 通过。
示例 1:
输入:nums = [1,1,2]
输出:2, nums = [1,2,_]
解释:函数应该返回新的长度 2 ,并且原数组 nums 的前两个元素被修改为 1, 2 。不需要考虑数组中超出新长度后面的元素。
示例 2:
输入:nums = [0,0,1,1,1,2,2,3,3,4]
输出:5, nums = [0,1,2,3,4]
解释:函数应该返回新的长度 5 , 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4 。不需要考虑数组中超出新长度后面的元素。
提示:
1 <= nums.length <= 3 * 104
-104 <= nums[i] <= 104
nums 已按 升序 排列
"""
from typing import List
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
"""
列表去重后列表的长度
:param nums:
:return:
"""
result = set(nums)
return len(result)
if __name__ == '__main__':
s = Solution()
li = s.removeDuplicates([0, 0, 1, 1, 1, 2, 2, 3, 3, 4])
print(li)
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册