seawolf2357 commited on
Commit
c034f68
Β·
verified Β·
1 Parent(s): a73d28d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -1,22 +1,23 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
  import requests
 
 
 
 
4
 
5
  # 이미지 인식 νŒŒμ΄ν”„λΌμΈ λ‘œλ“œ
6
  image_model = pipeline("image-classification", model="google/vit-base-patch16-224")
7
 
8
- hugging_face_auth_token = os.getenv("HUGGING_FACE_AUTH_TOKEN")
9
-
10
  def get_audiogen(prompt):
11
  # μ˜€λ””μ˜€ 생성 λͺ¨λΈ API 호좜
12
  response = requests.post(
13
  "https://api-inference.huggingface.co/models/fffiloni/audiogen",
14
- headers={"Authorization": "Bearer os.getenv"},
15
  json={"inputs": prompt, "parameters": {"length": 10}, "options": {"use_cache": False}}
16
  )
17
  result = response.json()
18
- # μ—¬κΈ°μ—μ„œ result 처리 λ‘œμ§μ„ κ΅¬ν˜„ν•©λ‹ˆλ‹€.
19
- # 예: μƒμ„±λœ μ˜€λ””μ˜€ 파일의 URL을 λ°˜ν™˜ν•˜κ±°λ‚˜, μ˜€λ””μ˜€ 데이터 자체λ₯Ό λ°˜ν™˜ν•  수 μžˆμŠ΅λ‹ˆλ‹€.
20
  return result
21
 
22
  def classify_and_generate_audio(uploaded_image):
@@ -27,8 +28,7 @@ def classify_and_generate_audio(uploaded_image):
27
  # μ˜€λ””μ˜€ 생성
28
  audio_result = get_audiogen(top_prediction)
29
 
30
- # audio_resultλ₯Ό μ²˜λ¦¬ν•˜μ—¬ Gradioκ°€ μž¬μƒν•  수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ°˜ν™˜ν•©λ‹ˆλ‹€.
31
- # 예: audio_result['url'] λ˜λŠ” audio_result['audio_data'] λ“±
32
  return top_prediction, audio_result
33
 
34
  # Gradio μΈν„°νŽ˜μ΄μŠ€ 생성
@@ -42,4 +42,3 @@ iface = gr.Interface(
42
 
43
  # μΈν„°νŽ˜μ΄μŠ€ μ‹€ν–‰
44
  iface.launch()
45
-
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  import requests
4
+ import os
5
+
6
+ # ν™˜κ²½λ³€μˆ˜μ—μ„œ Hugging Face API 토큰을 λ‘œλ“œ
7
+ hugging_face_auth_token = os.getenv("HUGGING_FACE_AUTH_TOKEN")
8
 
9
  # 이미지 인식 νŒŒμ΄ν”„λΌμΈ λ‘œλ“œ
10
  image_model = pipeline("image-classification", model="google/vit-base-patch16-224")
11
 
 
 
12
  def get_audiogen(prompt):
13
  # μ˜€λ””μ˜€ 생성 λͺ¨λΈ API 호좜
14
  response = requests.post(
15
  "https://api-inference.huggingface.co/models/fffiloni/audiogen",
16
+ headers={"Authorization": f"Bearer {hugging_face_auth_token}"}, # μˆ˜μ •λœ λΆ€λΆ„
17
  json={"inputs": prompt, "parameters": {"length": 10}, "options": {"use_cache": False}}
18
  )
19
  result = response.json()
20
+ # μƒμ„±λœ μ˜€λ””μ˜€ 파일의 URL을 λ°˜ν™˜ν•˜κ±°λ‚˜, μ˜€λ””μ˜€ 데이터 자체λ₯Ό λ°˜ν™˜
 
21
  return result
22
 
23
  def classify_and_generate_audio(uploaded_image):
 
28
  # μ˜€λ””μ˜€ 생성
29
  audio_result = get_audiogen(top_prediction)
30
 
31
+ # audio_resultλ₯Ό μ²˜λ¦¬ν•˜μ—¬ Gradioκ°€ μž¬μƒν•  수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ°˜ν™˜
 
32
  return top_prediction, audio_result
33
 
34
  # Gradio μΈν„°νŽ˜μ΄μŠ€ 생성
 
42
 
43
  # μΈν„°νŽ˜μ΄μŠ€ μ‹€ν–‰
44
  iface.launch()