dror201031 commited on
Commit
2f394e3
verified
1 Parent(s): f3dde0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -45
app.py CHANGED
@@ -10,16 +10,13 @@ try:
10
  import transformers
11
  import huggingface_hub
12
  import torch
13
- import accelerate
14
- import bitsandbytes
15
  except ImportError:
16
  print("诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转...")
17
  packages = [
18
  "transformers>=4.38.0",
19
  "huggingface_hub>=0.20.0",
20
  "torch>=2.0.0",
21
- "accelerate>=0.25.0",
22
- "bitsandbytes>=0.40.0"
23
  ]
24
  subprocess.check_call([sys.executable, "-m", "pip", "install"] + packages)
25
  print("讛转拽谞转 讛讞讘讬诇讜转 讛讜砖诇诪讛 讘讛爪诇讞讛!")
@@ -43,8 +40,8 @@ if token:
43
  else:
44
  print("讗讝讛专讛: 讟讜拽谉 HF_TOKEN 诇讗 诪讜讙讚专 讘住讘讬讘转 讛注讘讜讚讛. 讬讬转讻谉 砖诇讗 转讛讬讛 讙讬砖讛 诇诪讜讚诇讬诐 诪讜讙讘诇讬诐.")
45
 
46
- # 讛讙讚专转 砖诐 讛诪讜讚诇 - 诪砖转诪砖 讘诪讜讚诇 驻转讜讞 砖诇讗 讚讜专砖 讗讬砖讜专 诪讬讜讞讚
47
- model_name = "Fedifehmi/gemma3n-e2b-children-storyteller" # 诪讜讚诇 诪讘讜住住 Gemma 驻转讜讞
48
 
49
  # 谞讬住讬讜谉 讟注讬谞讛 砖诇 讛诪讜讚诇
50
  print(f"讟讜注谉 诪讜讚诇 {model_name}...")
@@ -54,9 +51,7 @@ try:
54
  "text-generation",
55
  model=model_name,
56
  device_map="auto",
57
- torch_dtype="auto",
58
- trust_remote_code=True, # 诇讛讜住讬祝 转诪讬讻讛 讘拽讜讚 诪专讜讞拽
59
- model_kwargs={"quantization_config": {"load_in_4bit": True}}
60
  )
61
  print("讟注讬谞转 讛诪讜讚诇 讛爪诇讬讞讛!")
62
  except Exception as e:
@@ -64,14 +59,12 @@ except Exception as e:
64
  # 谞讬住讬讜谉 诇讟注讜谉 诪讜讚诇 驻转讜讞 驻砖讜讟 讬讜转专 讘诪拽专讛 砖诇 讻讬砖诇讜谉
65
  try:
66
  print("诪谞住讛 诇讟注讜谉 诪讜讚诇 讞诇讜驻讬...")
67
- model_name = "mistralai/Mistral-7B-Instruct-v0.2"
68
  generator = transformers.pipeline(
69
  "text-generation",
70
  model=model_name,
71
  device_map="auto",
72
- torch_dtype="auto",
73
- trust_remote_code=True,
74
- model_kwargs={"quantization_config": {"load_in_4bit": True}}
75
  )
76
  print("讟注讬谞转 讛诪讜讚诇 讛讞诇讜驻讬 讛爪诇讬讞讛!")
77
  except Exception as e2:
@@ -82,40 +75,26 @@ def ask_model(prompt):
82
  return "讛诪讜讚诇 诇讗 谞讟注谉 讘讛爪诇讞讛. 讘讚讜拽 讗转 诇讜讙 讛砖讙讬讗讜转."
83
 
84
  try:
85
- # 讛转讗诪转 讛驻专讜诪驻讟 诇驻讜专诪讟 讛谞讻讜谉 诇驻讬 住讜讙 讛诪讜讚诇
86
- if "mistral" in model_name.lower():
87
- # 驻讜专诪讟 诇诪讜讚诇讬诐 诪住讜讙 Mistral
88
- formatted_prompt = f"<s>[INST] {prompt} [/INST]"
89
- outputs = generator(
90
- formatted_prompt,
91
- max_new_tokens=200,
92
- do_sample=True,
93
- temperature=0.7,
94
- top_p=0.95,
95
- return_full_text=False
96
- )
97
- else:
98
- # 驻讜专诪讟 诇诪讜讚诇讬诐 诪住讜讙 Gemma 讜讚讜诪讬诐
99
- outputs = generator(
100
- [{"role": "user", "content": prompt}],
101
- max_new_tokens=200,
102
- do_sample=True,
103
- temperature=0.7,
104
- top_p=0.95,
105
- return_full_text=False
106
- )
107
 
108
  # 诪讞讝讬专 讗转 讛讟拽住讟 砖谞讜爪专
109
- if isinstance(outputs, list) and "generated_text" in outputs[0]:
110
  return outputs[0]["generated_text"]
111
  else:
112
- return outputs[0]
113
  except Exception as e:
114
  return f"砖讙讬讗讛 讘讛驻注诇转 讛诪讜讚诇: {str(e)}"
115
 
116
  # 讬爪讬专转 诪诪砖拽 诪砖转诪砖
117
  with gr.Blocks() as demo:
118
- gr.Markdown("# 爪'讗讟 讞讻诐 注诐 讙'诪讛")
119
  gr.Markdown("讻转讜讘 砖讗诇讛 讗讜 讘拽砖讛 讜讛诪讜讚诇 讬注谞讛:")
120
 
121
  with gr.Row():
@@ -131,20 +110,20 @@ with gr.Blocks() as demo:
131
 
132
  gr.Examples(
133
  [
134
- "诪讛 讚注转讱 注诇 讘讬谞讛 诪诇讗讻讜转讬转?",
135
- "讻转讜讘 住讬驻讜专 拽爪专 注诇 讞转讜诇 讜讻诇讘 砖讛诐 讞讘专讬诐",
136
- "讛住讘专 诇讬 讗转 转讜专转 讛讬讞住讜转 讘爪讜专讛 驻砖讜讟讛",
137
- "诪讛诐 讛讬转专讜谞讜转 砖诇 诇诪讬讚转 诪讻讜谞讛?"
138
  ],
139
  input_text
140
  )
141
 
142
  gr.Markdown("""
143
  ### 讛注专讜转:
144
- - 讛诪注专讻转 诪砖转诪砖转 讘诪讜讚诇 砖驻讛 诪转拽讚诐 诇注谞讜转 注诇 砖讗诇讜转
145
  - 讛拽诇讬讚讜 砖讗诇讛 讗讜 讘拽砖讛 讜诇讞爪讜 注诇 '砖诇讞'
146
- - 讛转砖讜讘讜转 谞讜爪专讜转 讘讝诪谉 讗诪转 讜讻诇 转砖讜讘讛 注砖讜讬讛 诇讛讬讜转 砖讜谞讛
147
  """)
148
 
149
  # 讛驻注诇转 讛诪诪砖拽
150
- demo.launch()
 
10
  import transformers
11
  import huggingface_hub
12
  import torch
 
 
13
  except ImportError:
14
  print("诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转...")
15
  packages = [
16
  "transformers>=4.38.0",
17
  "huggingface_hub>=0.20.0",
18
  "torch>=2.0.0",
19
+ "accelerate>=0.25.0"
 
20
  ]
21
  subprocess.check_call([sys.executable, "-m", "pip", "install"] + packages)
22
  print("讛转拽谞转 讛讞讘讬诇讜转 讛讜砖诇诪讛 讘讛爪诇讞讛!")
 
40
  else:
41
  print("讗讝讛专讛: 讟讜拽谉 HF_TOKEN 诇讗 诪讜讙讚专 讘住讘讬讘转 讛注讘讜讚讛. 讬讬转讻谉 砖诇讗 转讛讬讛 讙讬砖讛 诇诪讜讚诇讬诐 诪讜讙讘诇讬诐.")
42
 
43
+ # 讘讞讬专转 诪讜讚诇 驻转讜讞 诇讞诇讜讟讬谉
44
+ model_name = "facebook/opt-125m" # 诪讜讚诇 拽讟谉 讜驻转讜讞 诇讙诪专讬 诇诇讗 讛讙讘诇讜转 讙讬砖讛
45
 
46
  # 谞讬住讬讜谉 讟注讬谞讛 砖诇 讛诪讜讚诇
47
  print(f"讟讜注谉 诪讜讚诇 {model_name}...")
 
51
  "text-generation",
52
  model=model_name,
53
  device_map="auto",
54
+ torch_dtype="auto"
 
 
55
  )
56
  print("讟注讬谞转 讛诪讜讚诇 讛爪诇讬讞讛!")
57
  except Exception as e:
 
59
  # 谞讬住讬讜谉 诇讟注讜谉 诪讜讚诇 驻转讜讞 驻砖讜讟 讬讜转专 讘诪拽专讛 砖诇 讻讬砖诇讜谉
60
  try:
61
  print("诪谞住讛 诇讟注讜谉 诪讜讚诇 讞诇讜驻讬...")
62
+ model_name = "distilbert-base-uncased" # 诪讜讚诇 拽讟谉 注讜讚 讬讜转专 讜驻转讜讞
63
  generator = transformers.pipeline(
64
  "text-generation",
65
  model=model_name,
66
  device_map="auto",
67
+ torch_dtype="auto"
 
 
68
  )
69
  print("讟注讬谞转 讛诪讜讚诇 讛讞诇讜驻讬 讛爪诇讬讞讛!")
70
  except Exception as e2:
 
75
  return "讛诪讜讚诇 诇讗 谞讟注谉 讘讛爪诇讞讛. 讘讚讜拽 讗转 诇讜讙 讛砖讙讬讗讜转."
76
 
77
  try:
78
+ outputs = generator(
79
+ prompt,
80
+ max_new_tokens=100,
81
+ do_sample=True,
82
+ temperature=0.7,
83
+ top_p=0.95,
84
+ return_full_text=False
85
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  # 诪讞讝讬专 讗转 讛讟拽住讟 砖谞讜爪专
88
+ if isinstance(outputs, list) and len(outputs) > 0:
89
  return outputs[0]["generated_text"]
90
  else:
91
+ return str(outputs)
92
  except Exception as e:
93
  return f"砖讙讬讗讛 讘讛驻注诇转 讛诪讜讚诇: {str(e)}"
94
 
95
  # 讬爪讬专转 诪诪砖拽 诪砖转诪砖
96
  with gr.Blocks() as demo:
97
+ gr.Markdown("# 爪'讗讟 讞讻诐")
98
  gr.Markdown("讻转讜讘 砖讗诇讛 讗讜 讘拽砖讛 讜讛诪讜讚诇 讬注谞讛:")
99
 
100
  with gr.Row():
 
110
 
111
  gr.Examples(
112
  [
113
+ "住驻专 诇讬 注诇 讘讬谞讛 诪诇讗讻讜转讬转",
114
+ "讻转讜讘 住讬驻讜专 拽爪专",
115
+ "诪讛讜 驻讬讬转讜谉?",
116
+ "诪讛诐 讬转专讜谞讜转 砖诇 诇诪讬讚转 诪讻讜谞讛?"
117
  ],
118
  input_text
119
  )
120
 
121
  gr.Markdown("""
122
  ### 讛注专讜转:
123
+ - 讛诪注专讻转 诪砖转诪砖转 讘诪讜讚诇 砖驻讛 驻转讜讞 诇讞诇讜讟讬谉
124
  - 讛拽诇讬讚讜 砖讗诇讛 讗讜 讘拽砖讛 讜诇讞爪讜 注诇 '砖诇讞'
125
+ - 讛转砖讜讘讜转 谞讜爪专讜转 讘讝诪谉 讗诪转
126
  """)
127
 
128
  # 讛驻注诇转 讛诪诪砖拽
129
+ demo.launch()