BarBar288 commited on
Commit
ff68386
·
verified ·
1 Parent(s): b3db120

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -2,20 +2,21 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
- import requests
6
- from PIL import Image
7
- import io
8
- from huggingface_hub import login # Correct import for authentication
9
  import os
 
 
 
 
 
10
 
11
  # Read the Hugging Face access token from the environment variable
12
  read_token = os.getenv('AccToken')
13
  if not read_token:
14
  raise ValueError("Hugging Face access token not found. Please set the AccToken environment variable.")
 
15
  login(read_token)
16
 
17
  # Define a dictionary of conversational models
18
-
19
  conversational_models = {
20
  "Qwen": "Qwen/QwQ-32B",
21
  "DeepSeek R1": "deepseek-ai/DeepSeek-R1",
@@ -59,16 +60,15 @@ summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cn
59
  try:
60
  text_to_audio_pipeline = pipeline("text-to-audio", model="stabilityai/stable-audio-open-1.0", use_auth_token=read_token)
61
  except ValueError as e:
62
- print(f"Error loading stabilityai/stable-audio-open-1.0: {e}")
63
- print("Falling back to a different text-to-audio model.")
64
  text_to_audio_pipeline = pipeline("text-to-audio", model="microsoft/speecht5_tts")
65
 
66
  audio_classification_pipeline = pipeline("audio-classification", model="facebook/wav2vec2-base")
67
 
68
-
69
  def load_conversational_model(model_name):
70
  if model_name not in conversational_models_loaded:
71
- print(f"Loading conversational model: {model_name}")
72
  tokenizer = AutoTokenizer.from_pretrained(conversational_models[model_name], use_auth_token=read_token)
73
  model = AutoModelForCausalLM.from_pretrained(conversational_models[model_name], use_auth_token=read_token)
74
  conversational_tokenizers[model_name] = tokenizer
@@ -97,7 +97,7 @@ def chat(model_name, user_input, history=[]):
97
 
98
  def generate_image(model_name, prompt):
99
  if model_name not in text_to_image_pipelines:
100
- print(f"Loading text-to-image model: {model_name}")
101
  text_to_image_pipelines[model_name] = StableDiffusionPipeline.from_pretrained(
102
  text_to_image_models[model_name], use_auth_token=read_token
103
  )
@@ -107,7 +107,7 @@ def generate_image(model_name, prompt):
107
 
108
  def generate_speech(model_name, text):
109
  if model_name not in text_to_speech_pipelines:
110
- print(f"Loading text-to-speech model: {model_name}")
111
  text_to_speech_pipelines[model_name] = pipeline(
112
  "text-to-speech", model=text_to_speech_models[model_name], use_auth_token=read_token
113
  )
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
 
 
 
 
5
  import os
6
+ import logging
7
+
8
+ # Set up logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
11
 
12
  # Read the Hugging Face access token from the environment variable
13
  read_token = os.getenv('AccToken')
14
  if not read_token:
15
  raise ValueError("Hugging Face access token not found. Please set the AccToken environment variable.")
16
+ from huggingface_hub import login
17
  login(read_token)
18
 
19
  # Define a dictionary of conversational models
 
20
  conversational_models = {
21
  "Qwen": "Qwen/QwQ-32B",
22
  "DeepSeek R1": "deepseek-ai/DeepSeek-R1",
 
60
  try:
61
  text_to_audio_pipeline = pipeline("text-to-audio", model="stabilityai/stable-audio-open-1.0", use_auth_token=read_token)
62
  except ValueError as e:
63
+ logger.error(f"Error loading stabilityai/stable-audio-open-1.0: {e}")
64
+ logger.info("Falling back to a different text-to-audio model.")
65
  text_to_audio_pipeline = pipeline("text-to-audio", model="microsoft/speecht5_tts")
66
 
67
  audio_classification_pipeline = pipeline("audio-classification", model="facebook/wav2vec2-base")
68
 
 
69
  def load_conversational_model(model_name):
70
  if model_name not in conversational_models_loaded:
71
+ logger.info(f"Loading conversational model: {model_name}")
72
  tokenizer = AutoTokenizer.from_pretrained(conversational_models[model_name], use_auth_token=read_token)
73
  model = AutoModelForCausalLM.from_pretrained(conversational_models[model_name], use_auth_token=read_token)
74
  conversational_tokenizers[model_name] = tokenizer
 
97
 
98
  def generate_image(model_name, prompt):
99
  if model_name not in text_to_image_pipelines:
100
+ logger.info(f"Loading text-to-image model: {model_name}")
101
  text_to_image_pipelines[model_name] = StableDiffusionPipeline.from_pretrained(
102
  text_to_image_models[model_name], use_auth_token=read_token
103
  )
 
107
 
108
  def generate_speech(model_name, text):
109
  if model_name not in text_to_speech_pipelines:
110
+ logger.info(f"Loading text-to-speech model: {model_name}")
111
  text_to_speech_pipelines[model_name] = pipeline(
112
  "text-to-speech", model=text_to_speech_models[model_name], use_auth_token=read_token
113
  )