awacke1 commited on
Commit
2c159cb
·
1 Parent(s): 5786d0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py CHANGED
@@ -7,6 +7,8 @@ import json
7
  import mistune
8
  import pytz
9
  import math
 
 
10
  from datetime import datetime
11
  from openai import ChatCompletion
12
  from xml.etree import ElementTree as ET
@@ -20,6 +22,42 @@ menu = ["txt", "htm", "md", "py"]
20
  choice = st.sidebar.selectbox("Output File Type:", menu)
21
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def chat_with_model(prompt, document_section):
24
  model = model_choice
25
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
 
7
  import mistune
8
  import pytz
9
  import math
10
+ import requests
11
+
12
  from datetime import datetime
13
  from openai import ChatCompletion
14
  from xml.etree import ElementTree as ET
 
22
  choice = st.sidebar.selectbox("Output File Type:", menu)
23
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
24
 
25
+ def transcribe_audio_ui(openai_key):
26
+ OPENAI_API_KEY = openai_key
27
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
28
+
29
+ headers = {
30
+ "Authorization": f"Bearer {OPENAI_API_KEY}",
31
+ "Content-Type": "multipart/form-data",
32
+ }
33
+
34
+ # Function to call the API
35
+ def transcribe_audio(file_path, model):
36
+ data = {
37
+ 'file': open(file_path, 'rb'),
38
+ 'model': model,
39
+ }
40
+
41
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data)
42
+
43
+ if response.status_code == 200:
44
+ return response.json().get('text')
45
+ else:
46
+ st.error("Error in API call.")
47
+ return None
48
+
49
+ # Streamlit UI
50
+ st.title("Audio Transcription Service")
51
+
52
+ audio_file = st.file_uploader("Upload your audio file", type=['mp3'])
53
+
54
+ if audio_file is not None:
55
+ if st.button("Transcribe"):
56
+ transcription = transcribe_audio(audio_file, "whisper-1")
57
+ st.write(transcription)
58
+
59
+ transcribe_audio_ui(openai.api_key)
60
+
61
  def chat_with_model(prompt, document_section):
62
  model = model_choice
63
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]