cleaning up backend sins now
This commit is contained in:
parent
64bb9f9db3
commit
1916185f19
|
|
@ -1,8 +1,8 @@
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
from fastapi import FastAPI, File, Response, Request
|
from fastapi import FastAPI, File, Response, Request
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.responses import FileResponse
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
|
|
@ -26,22 +26,20 @@ class Conversation(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
@app.post("/get-text")
|
@app.post("/get-text")
|
||||||
def stt(audio: bytes = File()):
|
async def stt(audio: bytes = File()):
|
||||||
with open("audio.webm", "wb+") as f:
|
with BytesIO(audio) as f:
|
||||||
f.write(audio)
|
f.name = "audio.mp3"
|
||||||
transcript = openAI_clinet.audio.transcriptions.create(
|
transcript = openAI_clinet.audio.transcriptions.create(
|
||||||
model="whisper-1",
|
model="whisper-1",
|
||||||
file=f,
|
file=f,
|
||||||
response_format="text",
|
response_format="text",
|
||||||
)
|
)
|
||||||
data = {"len": len(audio), "user-transcript": transcript}
|
data = {"len": len(audio), "user-transcript": transcript}
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
@app.post("/conversation")
|
@app.post("/conversation")
|
||||||
async def get_next_response(request: Request):
|
async def get_next_response(request: Request):
|
||||||
# role = "test"
|
|
||||||
# res_msg = "temp test response"
|
|
||||||
messages = await request.json()
|
messages = await request.json()
|
||||||
res = openAI_clinet.chat.completions.create(
|
res = openAI_clinet.chat.completions.create(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
|
@ -57,10 +55,6 @@ async def get_next_response(request: Request):
|
||||||
@app.get("/speak")
|
@app.get("/speak")
|
||||||
def tts(text: str):
|
def tts(text: str):
|
||||||
res = openAI_clinet.audio.speech.create(
|
res = openAI_clinet.audio.speech.create(
|
||||||
model="tts-1",
|
model="tts-1", voice="nova", input=text, response_format="mp3"
|
||||||
voice="nova",
|
|
||||||
input=text,
|
|
||||||
response_format='mp3'
|
|
||||||
)
|
)
|
||||||
# this works for now but I need to find a way to stream this to response
|
|
||||||
return Response(content=res.content, media_type="audio/mp3")
|
return Response(content=res.content, media_type="audio/mp3")
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue