Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
from huggingfaceinferenceclient import HuggingFaceInferenceClient
|
2 |
-
from outpaintprocessor import DynamicImageOutpainter
|
3 |
-
from aivideopipeline import AIImageVideoPipeline
|
4 |
-
from mmig import MultiModelImageGenerator
|
5 |
|
6 |
|
7 |
import os
|
@@ -12,9 +12,19 @@ from huggingface_hub import InferenceClient
|
|
12 |
from IPython.display import Audio, display
|
13 |
import gradio as gr
|
14 |
|
|
|
|
|
|
|
|
|
15 |
# Whisper for Speech-to-Text
|
16 |
WHISPER_API_URL = "https://api-inference.huggingface.co/models/distil-whisper/distil-large-v2"
|
17 |
-
WHISPER_HEADERS = {"Authorization": "Bearer
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def speech_to_text(filename):
|
20 |
with open(filename, "rb") as f:
|
@@ -27,13 +37,13 @@ def speech_to_text(filename):
|
|
27 |
return None
|
28 |
|
29 |
# Chatbot Logic with Hugging Face InferenceClient
|
30 |
-
client = InferenceClient(api_key=
|
31 |
|
32 |
def chatbot_logic(input_text):
|
33 |
messages = [{"role": "user", "content": input_text}]
|
34 |
try:
|
35 |
completion = client.chat.completions.create(
|
36 |
-
model=
|
37 |
messages=messages,
|
38 |
max_tokens=500
|
39 |
)
|
@@ -42,9 +52,6 @@ def chatbot_logic(input_text):
|
|
42 |
print(f"Error: {e}")
|
43 |
return None
|
44 |
|
45 |
-
# Bark for Text-to-Speech
|
46 |
-
BARK_API_URL = "https://api-inference.huggingface.co/models/suno/bark"
|
47 |
-
BARK_HEADERS = {"Authorization": "Bearer hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
|
48 |
|
49 |
def text_to_speech(text):
|
50 |
payload = {"inputs": text}
|
@@ -55,10 +62,6 @@ def text_to_speech(text):
|
|
55 |
print(f"Error: {response.status_code} - {response.text}")
|
56 |
return None
|
57 |
|
58 |
-
# Flux for Image Generation
|
59 |
-
FLUX_API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
|
60 |
-
FLUX_HEADERS = {"Authorization": "Bearer hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
|
61 |
-
|
62 |
def generate_image(prompt):
|
63 |
data = {"inputs": prompt}
|
64 |
response = requests.post(FLUX_API_URL, headers=FLUX_HEADERS, json=data)
|
|
|
1 |
+
#from huggingfaceinferenceclient import HuggingFaceInferenceClient
|
2 |
+
#from outpaintprocessor import DynamicImageOutpainter
|
3 |
+
#from aivideopipeline import AIImageVideoPipeline
|
4 |
+
#from mmig import MultiModelImageGenerator
|
5 |
|
6 |
|
7 |
import os
|
|
|
12 |
from IPython.display import Audio, display
|
13 |
import gradio as gr
|
14 |
|
15 |
+
read_token = os.getenv('HF_READ')
|
16 |
+
write_token = os.getenv('HF_WRITE')
|
17 |
+
#chatmodel
|
18 |
+
chatmodel="mistralai/Mistral-Nemo-Instruct-2407"
|
19 |
# Whisper for Speech-to-Text
|
20 |
WHISPER_API_URL = "https://api-inference.huggingface.co/models/distil-whisper/distil-large-v2"
|
21 |
+
WHISPER_HEADERS = {"Authorization": "Bearer " + read_token}
|
22 |
+
# Bark for Text-to-Speech
|
23 |
+
BARK_API_URL = "https://api-inference.huggingface.co/models/suno/bark"
|
24 |
+
BARK_HEADERS = {"Authorization": "Bearer "+read_token}
|
25 |
+
# Flux for Image Generation
|
26 |
+
FLUX_API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
|
27 |
+
FLUX_HEADERS = {"Authorization": "Bearer "+read_token}
|
28 |
|
29 |
def speech_to_text(filename):
|
30 |
with open(filename, "rb") as f:
|
|
|
37 |
return None
|
38 |
|
39 |
# Chatbot Logic with Hugging Face InferenceClient
|
40 |
+
client = InferenceClient(api_key=read_token)
|
41 |
|
42 |
def chatbot_logic(input_text):
|
43 |
messages = [{"role": "user", "content": input_text}]
|
44 |
try:
|
45 |
completion = client.chat.completions.create(
|
46 |
+
model=chatmodel,
|
47 |
messages=messages,
|
48 |
max_tokens=500
|
49 |
)
|
|
|
52 |
print(f"Error: {e}")
|
53 |
return None
|
54 |
|
|
|
|
|
|
|
55 |
|
56 |
def text_to_speech(text):
|
57 |
payload = {"inputs": text}
|
|
|
62 |
print(f"Error: {response.status_code} - {response.text}")
|
63 |
return None
|
64 |
|
|
|
|
|
|
|
|
|
65 |
def generate_image(prompt):
|
66 |
data = {"inputs": prompt}
|
67 |
response = requests.post(FLUX_API_URL, headers=FLUX_HEADERS, json=data)
|