66 lines
1.9 KiB
Python
66 lines
1.9 KiB
Python
import openai
|
|
|
|
import reflex as rx
|
|
|
|
|
|
class ChatappState(rx.State):
|
|
# The current question being asked.
|
|
question: str
|
|
|
|
# Keep track of the chat history as a list of (question, answer) tuples.
|
|
chat_history: list[tuple[str, str]]
|
|
|
|
def answer(self):
|
|
# Our chatbot is not very smart right now...
|
|
answer = "I don't know!"
|
|
self.chat_history.append((self.question, answer))
|
|
|
|
def answer2(self):
|
|
# Our chatbot is not very smart right now...
|
|
answer = "I don't know!"
|
|
self.chat_history.append((self.question, answer))
|
|
# Clear the question input.
|
|
self.question = ""
|
|
|
|
async def answer3(self):
|
|
import asyncio
|
|
|
|
# Our chatbot is not very smart right now...
|
|
answer = "I don't know!"
|
|
self.chat_history.append((self.question, ""))
|
|
|
|
# Clear the question input.
|
|
self.question = ""
|
|
# Yield here to clear the frontend input before continuing.
|
|
yield
|
|
|
|
for i in range(len(answer)):
|
|
await asyncio.sleep(0.1)
|
|
self.chat_history[-1] = (self.chat_history[-1][0], answer[: i + 1])
|
|
yield
|
|
|
|
def answer4(self):
|
|
# Our chatbot has some brains now!
|
|
session = openai.ChatCompletion.create(
|
|
model="gpt-3.5-turbo",
|
|
messages=[{"role": "user", "content": self.question}],
|
|
stop=None,
|
|
temperature=0.7,
|
|
stream=True,
|
|
)
|
|
|
|
# Add to the answer as the chatbot responds.
|
|
answer = ""
|
|
self.chat_history.append((self.question, answer))
|
|
|
|
# Clear the question input.
|
|
self.question = ""
|
|
# Yield here to clear the frontend input before continuing.
|
|
yield
|
|
|
|
for item in session:
|
|
if hasattr(item.choices[0].delta, "content"):
|
|
answer += item.choices[0].delta.content
|
|
self.chat_history[-1] = (self.chat_history[-1][0], answer)
|
|
yield
|