Spaces:
Running
Running
Upload 15 files
Browse files- .gitignore +39 -0
- .gradio/certificate.pem +31 -0
- README.md +39 -14
- app.py +281 -0
- models/__pycache__/audio_gen.cpython-313.pyc +0 -0
- models/__pycache__/code_gen.cpython-313.pyc +0 -0
- models/__pycache__/image_gen.cpython-313.pyc +0 -0
- models/__pycache__/summarizer.cpython-313.pyc +0 -0
- models/__pycache__/text_gen.cpython-313.pyc +0 -0
- models/audio_gen.py +46 -0
- models/code_gen.py +191 -0
- models/image_gen.py +55 -0
- models/summarizer.py +37 -0
- models/text_gen.py +108 -0
- requirements.txt +14 -0
.gitignore
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
env/
|
8 |
+
build/
|
9 |
+
develop-eggs/
|
10 |
+
dist/
|
11 |
+
downloads/
|
12 |
+
eggs/
|
13 |
+
.eggs/
|
14 |
+
lib/
|
15 |
+
lib64/
|
16 |
+
parts/
|
17 |
+
sdist/
|
18 |
+
var/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
|
23 |
+
# Virtual Environment
|
24 |
+
venv/
|
25 |
+
ENV/
|
26 |
+
|
27 |
+
# IDE
|
28 |
+
.idea/
|
29 |
+
.vscode/
|
30 |
+
*.swp
|
31 |
+
*.swo
|
32 |
+
|
33 |
+
# OS
|
34 |
+
.DS_Store
|
35 |
+
Thumbs.db
|
36 |
+
|
37 |
+
# Hugging Face
|
38 |
+
.cache/
|
39 |
+
.huggingface/
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
README.md
CHANGED
@@ -1,14 +1,39 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GenAI Content Studio
|
2 |
+
|
3 |
+
A free and open-source AI content generation studio with multiple features:
|
4 |
+
|
5 |
+
- Text Generation
|
6 |
+
- Text Summarization
|
7 |
+
- Image Generation
|
8 |
+
- Audio Generation
|
9 |
+
- Code Generation
|
10 |
+
|
11 |
+
## Features
|
12 |
+
|
13 |
+
- **Text Generation**: Generate creative text using OPT-350M
|
14 |
+
- **Text Summarization**: Summarize long texts using BART
|
15 |
+
- **Image Generation**: Create images from text prompts using Stable Diffusion
|
16 |
+
- **Audio Generation**: Convert text to speech using MMS-TTS
|
17 |
+
- **Code Generation**: Generate Python code using CodeGPT
|
18 |
+
|
19 |
+
## Setup
|
20 |
+
|
21 |
+
1. Clone the repository
|
22 |
+
2. Install dependencies:
|
23 |
+
```bash
|
24 |
+
pip install -r requirements.txt
|
25 |
+
```
|
26 |
+
|
27 |
+
3. Run the application:
|
28 |
+
```bash
|
29 |
+
python app.py
|
30 |
+
```
|
31 |
+
|
32 |
+
## Deployment
|
33 |
+
|
34 |
+
This application is deployed on Hugging Face Spaces:
|
35 |
+
[Link to be added after deployment]
|
36 |
+
|
37 |
+
## License
|
38 |
+
|
39 |
+
MIT License
|
app.py
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from models.text_gen import TextGenerator
|
4 |
+
from models.summarizer import TextSummarizer
|
5 |
+
from models.image_gen import ImageGenerator
|
6 |
+
from models.audio_gen import AudioGenerator
|
7 |
+
from models.code_gen import CodeGenerator
|
8 |
+
|
9 |
+
# Debug GPU availability
|
10 |
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
11 |
+
if torch.cuda.is_available():
|
12 |
+
print(f"CUDA device count: {torch.cuda.device_count()}")
|
13 |
+
print(f"Current CUDA device: {torch.cuda.current_device()}")
|
14 |
+
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
|
15 |
+
print(f"CUDA device properties: {torch.cuda.get_device_properties(0)}")
|
16 |
+
|
17 |
+
# Initialize the models
|
18 |
+
text_generator = TextGenerator()
|
19 |
+
text_summarizer = TextSummarizer()
|
20 |
+
image_generator = ImageGenerator()
|
21 |
+
audio_generator = AudioGenerator()
|
22 |
+
code_generator = CodeGenerator()
|
23 |
+
|
24 |
+
def generate_text(prompt, max_length, temperature, top_p):
|
25 |
+
try:
|
26 |
+
generated_text = text_generator.generate_text(
|
27 |
+
prompt=prompt,
|
28 |
+
max_length=max_length,
|
29 |
+
temperature=temperature,
|
30 |
+
top_p=top_p
|
31 |
+
)
|
32 |
+
return generated_text
|
33 |
+
except Exception as e:
|
34 |
+
return f"Error generating text: {str(e)}"
|
35 |
+
|
36 |
+
def summarize_text(text, max_length, min_length):
|
37 |
+
try:
|
38 |
+
summary = text_summarizer.summarize(
|
39 |
+
text=text,
|
40 |
+
max_length=max_length,
|
41 |
+
min_length=min_length
|
42 |
+
)
|
43 |
+
return summary
|
44 |
+
except Exception as e:
|
45 |
+
return f"Error generating summary: {str(e)}"
|
46 |
+
|
47 |
+
def generate_image(prompt, num_steps, guidance_scale):
|
48 |
+
try:
|
49 |
+
image = image_generator.generate_image(
|
50 |
+
prompt=prompt,
|
51 |
+
num_inference_steps=num_steps,
|
52 |
+
guidance_scale=guidance_scale
|
53 |
+
)
|
54 |
+
return image
|
55 |
+
except Exception as e:
|
56 |
+
return f"Error generating image: {str(e)}"
|
57 |
+
|
58 |
+
def generate_audio(text):
|
59 |
+
try:
|
60 |
+
audio, sample_rate = audio_generator.generate_audio(
|
61 |
+
text=text
|
62 |
+
)
|
63 |
+
return (sample_rate, audio)
|
64 |
+
except Exception as e:
|
65 |
+
return f"Error generating audio: {str(e)}"
|
66 |
+
|
67 |
+
def generate_code(prompt, max_length, temperature, top_p):
|
68 |
+
try:
|
69 |
+
code = code_generator.generate_code(
|
70 |
+
prompt=prompt,
|
71 |
+
max_length=max_length,
|
72 |
+
temperature=temperature,
|
73 |
+
top_p=top_p
|
74 |
+
)
|
75 |
+
return code
|
76 |
+
except Exception as e:
|
77 |
+
return f"Error generating code: {str(e)}"
|
78 |
+
|
79 |
+
# Create the Gradio interface
|
80 |
+
with gr.Blocks(title="GenAI Content Studio") as app:
|
81 |
+
gr.Markdown("# 🎨 GenAI Content Studio")
|
82 |
+
gr.Markdown("### Free and Open-Source AI Content Generation")
|
83 |
+
|
84 |
+
with gr.Tabs():
|
85 |
+
with gr.TabItem("Text Generation"):
|
86 |
+
with gr.Row():
|
87 |
+
with gr.Column():
|
88 |
+
prompt = gr.Textbox(
|
89 |
+
label="Enter your prompt",
|
90 |
+
placeholder="Type your text here...",
|
91 |
+
lines=3
|
92 |
+
)
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
max_length = gr.Slider(
|
96 |
+
minimum=50,
|
97 |
+
maximum=500,
|
98 |
+
value=100,
|
99 |
+
step=50,
|
100 |
+
label="Max Length"
|
101 |
+
)
|
102 |
+
temperature = gr.Slider(
|
103 |
+
minimum=0.1,
|
104 |
+
maximum=1.0,
|
105 |
+
value=0.7,
|
106 |
+
step=0.1,
|
107 |
+
label="Temperature"
|
108 |
+
)
|
109 |
+
top_p = gr.Slider(
|
110 |
+
minimum=0.1,
|
111 |
+
maximum=1.0,
|
112 |
+
value=0.9,
|
113 |
+
step=0.1,
|
114 |
+
label="Top P"
|
115 |
+
)
|
116 |
+
|
117 |
+
generate_btn = gr.Button("Generate Text")
|
118 |
+
|
119 |
+
with gr.Column():
|
120 |
+
output = gr.Textbox(
|
121 |
+
label="Generated Text",
|
122 |
+
lines=10,
|
123 |
+
interactive=False
|
124 |
+
)
|
125 |
+
|
126 |
+
generate_btn.click(
|
127 |
+
fn=generate_text,
|
128 |
+
inputs=[prompt, max_length, temperature, top_p],
|
129 |
+
outputs=output
|
130 |
+
)
|
131 |
+
|
132 |
+
with gr.TabItem("Text Summarization"):
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Column():
|
135 |
+
text_input = gr.Textbox(
|
136 |
+
label="Enter text to summarize",
|
137 |
+
placeholder="Paste your text here...",
|
138 |
+
lines=10
|
139 |
+
)
|
140 |
+
|
141 |
+
with gr.Row():
|
142 |
+
max_summary_length = gr.Slider(
|
143 |
+
minimum=50,
|
144 |
+
maximum=200,
|
145 |
+
value=130,
|
146 |
+
step=10,
|
147 |
+
label="Max Summary Length"
|
148 |
+
)
|
149 |
+
min_summary_length = gr.Slider(
|
150 |
+
minimum=10,
|
151 |
+
maximum=100,
|
152 |
+
value=30,
|
153 |
+
step=10,
|
154 |
+
label="Min Summary Length"
|
155 |
+
)
|
156 |
+
|
157 |
+
summarize_btn = gr.Button("Summarize Text")
|
158 |
+
|
159 |
+
with gr.Column():
|
160 |
+
summary_output = gr.Textbox(
|
161 |
+
label="Generated Summary",
|
162 |
+
lines=5,
|
163 |
+
interactive=False
|
164 |
+
)
|
165 |
+
|
166 |
+
summarize_btn.click(
|
167 |
+
fn=summarize_text,
|
168 |
+
inputs=[text_input, max_summary_length, min_summary_length],
|
169 |
+
outputs=summary_output
|
170 |
+
)
|
171 |
+
|
172 |
+
with gr.TabItem("Image Generation"):
|
173 |
+
with gr.Row():
|
174 |
+
with gr.Column():
|
175 |
+
image_prompt = gr.Textbox(
|
176 |
+
label="Enter your image prompt",
|
177 |
+
placeholder="Describe the image you want to generate...",
|
178 |
+
lines=3
|
179 |
+
)
|
180 |
+
|
181 |
+
with gr.Row():
|
182 |
+
num_steps = gr.Slider(
|
183 |
+
minimum=20,
|
184 |
+
maximum=100,
|
185 |
+
value=50,
|
186 |
+
step=10,
|
187 |
+
label="Number of Steps"
|
188 |
+
)
|
189 |
+
guidance_scale = gr.Slider(
|
190 |
+
minimum=1.0,
|
191 |
+
maximum=20.0,
|
192 |
+
value=7.5,
|
193 |
+
step=0.5,
|
194 |
+
label="Guidance Scale"
|
195 |
+
)
|
196 |
+
|
197 |
+
generate_image_btn = gr.Button("Generate Image")
|
198 |
+
|
199 |
+
with gr.Column():
|
200 |
+
image_output = gr.Image(
|
201 |
+
label="Generated Image",
|
202 |
+
type="pil"
|
203 |
+
)
|
204 |
+
|
205 |
+
generate_image_btn.click(
|
206 |
+
fn=generate_image,
|
207 |
+
inputs=[image_prompt, num_steps, guidance_scale],
|
208 |
+
outputs=image_output
|
209 |
+
)
|
210 |
+
|
211 |
+
with gr.TabItem("Audio Generation"):
|
212 |
+
with gr.Row():
|
213 |
+
with gr.Column():
|
214 |
+
audio_text = gr.Textbox(
|
215 |
+
label="Enter text to convert to speech",
|
216 |
+
placeholder="Type what you want to hear...",
|
217 |
+
lines=3
|
218 |
+
)
|
219 |
+
|
220 |
+
generate_audio_btn = gr.Button("Generate Audio")
|
221 |
+
|
222 |
+
with gr.Column():
|
223 |
+
audio_output = gr.Audio(
|
224 |
+
label="Generated Audio",
|
225 |
+
type="numpy"
|
226 |
+
)
|
227 |
+
|
228 |
+
generate_audio_btn.click(
|
229 |
+
fn=generate_audio,
|
230 |
+
inputs=[audio_text],
|
231 |
+
outputs=audio_output
|
232 |
+
)
|
233 |
+
|
234 |
+
with gr.TabItem("Code Generation"):
|
235 |
+
with gr.Row():
|
236 |
+
with gr.Column():
|
237 |
+
code_prompt = gr.Textbox(
|
238 |
+
label="Enter your code prompt",
|
239 |
+
placeholder="Describe the code you want to generate...",
|
240 |
+
lines=3
|
241 |
+
)
|
242 |
+
|
243 |
+
with gr.Row():
|
244 |
+
max_length = gr.Slider(
|
245 |
+
minimum=50,
|
246 |
+
maximum=500,
|
247 |
+
value=100,
|
248 |
+
step=50,
|
249 |
+
label="Max Length"
|
250 |
+
)
|
251 |
+
temperature = gr.Slider(
|
252 |
+
minimum=0.1,
|
253 |
+
maximum=1.0,
|
254 |
+
value=0.7,
|
255 |
+
step=0.1,
|
256 |
+
label="Temperature"
|
257 |
+
)
|
258 |
+
top_p = gr.Slider(
|
259 |
+
minimum=0.1,
|
260 |
+
maximum=1.0,
|
261 |
+
value=0.9,
|
262 |
+
step=0.1,
|
263 |
+
label="Top P"
|
264 |
+
)
|
265 |
+
|
266 |
+
generate_code_btn = gr.Button("Generate Code")
|
267 |
+
|
268 |
+
with gr.Column():
|
269 |
+
code_output = gr.Code(
|
270 |
+
label="Generated Code",
|
271 |
+
language="python"
|
272 |
+
)
|
273 |
+
|
274 |
+
generate_code_btn.click(
|
275 |
+
fn=generate_code,
|
276 |
+
inputs=[code_prompt, max_length, temperature, top_p],
|
277 |
+
outputs=code_output
|
278 |
+
)
|
279 |
+
|
280 |
+
if __name__ == "__main__":
|
281 |
+
app.launch()
|
models/__pycache__/audio_gen.cpython-313.pyc
ADDED
Binary file (3 kB). View file
|
|
models/__pycache__/code_gen.cpython-313.pyc
ADDED
Binary file (7.38 kB). View file
|
|
models/__pycache__/image_gen.cpython-313.pyc
ADDED
Binary file (3.21 kB). View file
|
|
models/__pycache__/summarizer.cpython-313.pyc
ADDED
Binary file (2.42 kB). View file
|
|
models/__pycache__/text_gen.cpython-313.pyc
ADDED
Binary file (4.57 kB). View file
|
|
models/audio_gen.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import VitsModel, AutoTokenizer
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
class AudioGenerator:
|
6 |
+
def __init__(self):
|
7 |
+
print("Initializing Audio Generator...")
|
8 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
print(f"Using device: {self.device}")
|
10 |
+
|
11 |
+
# Load model and tokenizer
|
12 |
+
self.model_name = "facebook/mms-tts-eng"
|
13 |
+
print(f"Loading model {self.model_name}...")
|
14 |
+
|
15 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
16 |
+
self.model = VitsModel.from_pretrained(self.model_name).to(self.device)
|
17 |
+
print(f"Model loaded and moved to {self.device}")
|
18 |
+
|
19 |
+
def generate_audio(self, text, voice_preset=None):
|
20 |
+
"""
|
21 |
+
Generate audio from text using the MMS-TTS model
|
22 |
+
|
23 |
+
Args:
|
24 |
+
text (str): The text to convert to speech
|
25 |
+
voice_preset (str): Not used in this implementation
|
26 |
+
|
27 |
+
Returns:
|
28 |
+
tuple: (audio_array, sample_rate)
|
29 |
+
"""
|
30 |
+
try:
|
31 |
+
print(f"Generating audio on {self.device}...")
|
32 |
+
|
33 |
+
# Tokenize the input text
|
34 |
+
inputs = self.tokenizer(text, return_tensors="pt").to(self.device)
|
35 |
+
|
36 |
+
with torch.no_grad():
|
37 |
+
output = self.model(**inputs).waveform
|
38 |
+
|
39 |
+
# Convert to numpy array and normalize
|
40 |
+
audio = output.cpu().numpy().squeeze()
|
41 |
+
audio = (audio * 32767).astype(np.int16)
|
42 |
+
|
43 |
+
return audio, self.model.config.sampling_rate
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
return f"Error generating audio: {str(e)}"
|
models/code_gen.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import re
|
4 |
+
import textwrap
|
5 |
+
|
6 |
+
class CodeGenerator:
|
7 |
+
def __init__(self):
|
8 |
+
print("Initializing Code Generator...")
|
9 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
print(f"Using device: {self.device}")
|
11 |
+
|
12 |
+
# Load model and tokenizer
|
13 |
+
self.model_name = "microsoft/CodeGPT-small-py-adaptedGPT2"
|
14 |
+
print(f"Loading model {self.model_name}...")
|
15 |
+
|
16 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
17 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
18 |
+
self.model_name,
|
19 |
+
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
|
20 |
+
).to(self.device)
|
21 |
+
|
22 |
+
print(f"Model loaded and moved to {self.device}")
|
23 |
+
|
24 |
+
def generate_code(self, prompt, max_length=150, temperature=0.7, top_p=0.95):
|
25 |
+
"""
|
26 |
+
Generate code based on the given prompt
|
27 |
+
|
28 |
+
Args:
|
29 |
+
prompt (str): The prompt describing the code to generate
|
30 |
+
max_length (int): Maximum length of the generated code
|
31 |
+
temperature (float): Controls randomness in generation
|
32 |
+
top_p (float): Controls diversity of generation
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
str: Generated code
|
36 |
+
"""
|
37 |
+
try:
|
38 |
+
print(f"Generating code on {self.device}...")
|
39 |
+
|
40 |
+
# Format prompt for better code generation
|
41 |
+
formatted_prompt = f"# Python\n# Task: {prompt}\n# Solution:\n"
|
42 |
+
|
43 |
+
inputs = self.tokenizer(
|
44 |
+
formatted_prompt,
|
45 |
+
return_tensors="pt",
|
46 |
+
truncation=True,
|
47 |
+
max_length=512
|
48 |
+
).to(self.device)
|
49 |
+
|
50 |
+
with torch.no_grad():
|
51 |
+
outputs = self.model.generate(
|
52 |
+
**inputs,
|
53 |
+
max_length=max_length + len(inputs["input_ids"][0]),
|
54 |
+
temperature=temperature,
|
55 |
+
top_p=top_p,
|
56 |
+
num_return_sequences=1,
|
57 |
+
pad_token_id=self.tokenizer.eos_token_id,
|
58 |
+
do_sample=True,
|
59 |
+
repetition_penalty=1.1,
|
60 |
+
no_repeat_ngram_size=3
|
61 |
+
)
|
62 |
+
|
63 |
+
generated_code = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
64 |
+
# Remove the prompt from the generated code
|
65 |
+
generated_code = generated_code[len(formatted_prompt):]
|
66 |
+
|
67 |
+
# Format the code
|
68 |
+
formatted_code = self._format_code(generated_code)
|
69 |
+
return formatted_code
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
return f"Error generating code: {str(e)}"
|
73 |
+
|
74 |
+
def _format_code(self, code):
|
75 |
+
"""
|
76 |
+
Format the generated code for better readability
|
77 |
+
|
78 |
+
Args:
|
79 |
+
code (str): The code to format
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
str: Formatted code
|
83 |
+
"""
|
84 |
+
# Remove any trailing whitespace
|
85 |
+
code = code.strip()
|
86 |
+
|
87 |
+
# Split into lines and remove duplicates
|
88 |
+
lines = code.split('\n')
|
89 |
+
unique_lines = []
|
90 |
+
seen_lines = set()
|
91 |
+
|
92 |
+
for line in lines:
|
93 |
+
stripped_line = line.strip()
|
94 |
+
if stripped_line and stripped_line not in seen_lines:
|
95 |
+
seen_lines.add(stripped_line)
|
96 |
+
unique_lines.append(line)
|
97 |
+
|
98 |
+
# Fix common indentation issues
|
99 |
+
formatted_lines = []
|
100 |
+
|
101 |
+
# Track indentation level
|
102 |
+
indent_level = 0
|
103 |
+
for line in unique_lines:
|
104 |
+
# Skip empty lines
|
105 |
+
if not line.strip():
|
106 |
+
formatted_lines.append('')
|
107 |
+
continue
|
108 |
+
|
109 |
+
# Calculate current indentation
|
110 |
+
current_indent = len(line) - len(line.lstrip())
|
111 |
+
|
112 |
+
# Handle indentation changes
|
113 |
+
if line.strip().endswith(':'):
|
114 |
+
# Increase indent after colons
|
115 |
+
indent_level = current_indent + 4
|
116 |
+
elif current_indent > indent_level:
|
117 |
+
# Decrease indent if too deep
|
118 |
+
indent_level = max(0, indent_level - 4)
|
119 |
+
|
120 |
+
# Apply proper indentation
|
121 |
+
formatted_line = ' ' * indent_level + line.lstrip()
|
122 |
+
formatted_lines.append(formatted_line)
|
123 |
+
|
124 |
+
# Join lines with proper spacing
|
125 |
+
formatted_code = '\n'.join(formatted_lines)
|
126 |
+
|
127 |
+
# Add docstrings if missing
|
128 |
+
if 'def ' in formatted_code and '"""' not in formatted_code:
|
129 |
+
formatted_code = self._add_docstrings(formatted_code)
|
130 |
+
|
131 |
+
# Ensure proper spacing between functions/classes
|
132 |
+
formatted_code = re.sub(r'\n{3,}', '\n\n', formatted_code)
|
133 |
+
|
134 |
+
# Remove any duplicate code blocks
|
135 |
+
formatted_code = self._remove_duplicate_blocks(formatted_code)
|
136 |
+
|
137 |
+
return formatted_code
|
138 |
+
|
139 |
+
def _remove_duplicate_blocks(self, code):
|
140 |
+
"""
|
141 |
+
Remove duplicate code blocks
|
142 |
+
|
143 |
+
Args:
|
144 |
+
code (str): The code to clean
|
145 |
+
|
146 |
+
Returns:
|
147 |
+
str: Code with duplicates removed
|
148 |
+
"""
|
149 |
+
# Split into blocks (functions/classes)
|
150 |
+
blocks = re.split(r'(?=\n\s*(?:def|class)\s)', code)
|
151 |
+
unique_blocks = []
|
152 |
+
seen_blocks = set()
|
153 |
+
|
154 |
+
for block in blocks:
|
155 |
+
# Normalize block by removing whitespace
|
156 |
+
normalized = re.sub(r'\s+', ' ', block.strip())
|
157 |
+
if normalized and normalized not in seen_blocks:
|
158 |
+
seen_blocks.add(normalized)
|
159 |
+
unique_blocks.append(block)
|
160 |
+
|
161 |
+
return ''.join(unique_blocks).strip()
|
162 |
+
|
163 |
+
def _add_docstrings(self, code):
|
164 |
+
"""
|
165 |
+
Add docstrings to functions if missing
|
166 |
+
|
167 |
+
Args:
|
168 |
+
code (str): The code to add docstrings to
|
169 |
+
|
170 |
+
Returns:
|
171 |
+
str: Code with docstrings
|
172 |
+
"""
|
173 |
+
lines = code.split('\n')
|
174 |
+
formatted_lines = []
|
175 |
+
i = 0
|
176 |
+
|
177 |
+
while i < len(lines):
|
178 |
+
line = lines[i]
|
179 |
+
formatted_lines.append(line)
|
180 |
+
|
181 |
+
# Check for function definition
|
182 |
+
if line.strip().startswith('def '):
|
183 |
+
# Add docstring if next line doesn't have one
|
184 |
+
if i + 1 < len(lines) and '"""' not in lines[i + 1]:
|
185 |
+
indent = len(line) - len(line.lstrip())
|
186 |
+
docstring = f'{indent * " "} """\n{indent * " "} Docstring\n{indent * " "} """'
|
187 |
+
formatted_lines.append(docstring)
|
188 |
+
|
189 |
+
i += 1
|
190 |
+
|
191 |
+
return '\n'.join(formatted_lines)
|
models/image_gen.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import StableDiffusionPipeline
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
|
6 |
+
class ImageGenerator:
|
7 |
+
def __init__(self, model_name="CompVis/stable-diffusion-v1-4"):
|
8 |
+
# Explicit GPU detection and setup
|
9 |
+
if torch.cuda.is_available():
|
10 |
+
self.device = torch.device("cuda")
|
11 |
+
print(f"Image Generator: Using GPU - {torch.cuda.get_device_name(0)}")
|
12 |
+
print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
|
13 |
+
else:
|
14 |
+
self.device = torch.device("cpu")
|
15 |
+
print("Image Generator: Using CPU")
|
16 |
+
|
17 |
+
print(f"Loading model {model_name}...")
|
18 |
+
self.pipe = StableDiffusionPipeline.from_pretrained(
|
19 |
+
model_name,
|
20 |
+
torch_dtype=torch.float16 if self.device.type == "cuda" else torch.float32,
|
21 |
+
safety_checker=None, # Disable safety checker for better performance
|
22 |
+
variant="fp16" if self.device.type == "cuda" else None # Use fp16 weights on GPU
|
23 |
+
).to(self.device)
|
24 |
+
print(f"Model loaded and moved to {self.device}")
|
25 |
+
|
26 |
+
def generate_image(self, prompt, num_inference_steps=30, guidance_scale=7.0):
|
27 |
+
"""
|
28 |
+
Generate an image based on the given prompt
|
29 |
+
|
30 |
+
Args:
|
31 |
+
prompt (str): The text prompt to generate from
|
32 |
+
num_inference_steps (int): Number of denoising steps
|
33 |
+
guidance_scale (float): Scale for classifier-free guidance
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
PIL.Image: Generated image
|
37 |
+
"""
|
38 |
+
try:
|
39 |
+
print(f"Generating image on {self.device}...")
|
40 |
+
|
41 |
+
# Add quality prompts
|
42 |
+
enhanced_prompt = f"{prompt}, high quality, detailed, 4k, professional photography"
|
43 |
+
|
44 |
+
image = self.pipe(
|
45 |
+
enhanced_prompt,
|
46 |
+
num_inference_steps=num_inference_steps,
|
47 |
+
guidance_scale=guidance_scale,
|
48 |
+
negative_prompt="blurry, low quality, distorted, deformed",
|
49 |
+
width=512, # Reduced resolution for faster generation
|
50 |
+
height=512 # Reduced resolution for faster generation
|
51 |
+
).images[0]
|
52 |
+
|
53 |
+
return image
|
54 |
+
except Exception as e:
|
55 |
+
return f"Error generating image: {str(e)}"
|
models/summarizer.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import BartForConditionalGeneration, BartTokenizer
|
3 |
+
|
4 |
+
class TextSummarizer:
|
5 |
+
def __init__(self):
|
6 |
+
print("Initializing Text Summarizer...")
|
7 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
print(f"Using device: {self.device}")
|
9 |
+
|
10 |
+
# Load model and tokenizer
|
11 |
+
self.model_name = "facebook/bart-large-cnn"
|
12 |
+
self.tokenizer = BartTokenizer.from_pretrained(self.model_name)
|
13 |
+
self.model = BartForConditionalGeneration.from_pretrained(self.model_name).to(self.device)
|
14 |
+
print(f"Loaded {self.model_name} model and moved to {self.device}")
|
15 |
+
|
16 |
+
def summarize(self, text, max_length=130, min_length=30):
|
17 |
+
try:
|
18 |
+
# Tokenize the input text
|
19 |
+
inputs = self.tokenizer(text, return_tensors="pt", max_length=1024, truncation=True)
|
20 |
+
inputs = inputs.to(self.device)
|
21 |
+
|
22 |
+
# Generate summary
|
23 |
+
summary_ids = self.model.generate(
|
24 |
+
inputs["input_ids"],
|
25 |
+
max_length=max_length,
|
26 |
+
min_length=min_length,
|
27 |
+
num_beams=4,
|
28 |
+
length_penalty=2.0,
|
29 |
+
early_stopping=True
|
30 |
+
)
|
31 |
+
|
32 |
+
# Decode the generated summary
|
33 |
+
summary = self.tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
34 |
+
return summary
|
35 |
+
|
36 |
+
except Exception as e:
|
37 |
+
return f"Error generating summary: {str(e)}"
|
models/text_gen.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
|
4 |
+
class TextGenerator:
|
5 |
+
def __init__(self):
|
6 |
+
print("Initializing Text Generator...")
|
7 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
print(f"Using device: {self.device}")
|
9 |
+
|
10 |
+
# Load model and tokenizer
|
11 |
+
self.model_name = "facebook/opt-350m"
|
12 |
+
print(f"Loading model {self.model_name}...")
|
13 |
+
|
14 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
15 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
16 |
+
self.model_name,
|
17 |
+
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
|
18 |
+
).to(self.device)
|
19 |
+
|
20 |
+
print(f"Model loaded and moved to {self.device}")
|
21 |
+
|
22 |
+
def generate_text(self, prompt, max_length=200, temperature=0.7, top_p=0.9):
|
23 |
+
"""
|
24 |
+
Generate text based on the given prompt
|
25 |
+
|
26 |
+
Args:
|
27 |
+
prompt (str): The text generation prompt
|
28 |
+
max_length (int): Maximum length of the generated text
|
29 |
+
temperature (float): Controls randomness in generation
|
30 |
+
top_p (float): Controls diversity of generation
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
str: Generated text
|
34 |
+
"""
|
35 |
+
try:
|
36 |
+
print(f"Generating text on {self.device}...")
|
37 |
+
|
38 |
+
# Format prompt for better generation
|
39 |
+
formatted_prompt = f"Instruction: {prompt}\n\nResponse:"
|
40 |
+
|
41 |
+
inputs = self.tokenizer(
|
42 |
+
formatted_prompt,
|
43 |
+
return_tensors="pt",
|
44 |
+
truncation=True,
|
45 |
+
max_length=512
|
46 |
+
).to(self.device)
|
47 |
+
|
48 |
+
with torch.no_grad():
|
49 |
+
outputs = self.model.generate(
|
50 |
+
**inputs,
|
51 |
+
max_length=max_length + len(inputs["input_ids"][0]),
|
52 |
+
temperature=temperature,
|
53 |
+
top_p=top_p,
|
54 |
+
num_return_sequences=1,
|
55 |
+
pad_token_id=self.tokenizer.eos_token_id,
|
56 |
+
do_sample=True,
|
57 |
+
repetition_penalty=1.2,
|
58 |
+
no_repeat_ngram_size=3,
|
59 |
+
num_beams=5,
|
60 |
+
early_stopping=True
|
61 |
+
)
|
62 |
+
|
63 |
+
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
64 |
+
# Remove the prompt from the generated text
|
65 |
+
generated_text = generated_text[len(formatted_prompt):]
|
66 |
+
|
67 |
+
# Format the text
|
68 |
+
formatted_text = self._format_text(generated_text)
|
69 |
+
return formatted_text
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
return f"Error generating text: {str(e)}"
|
73 |
+
|
74 |
+
def _format_text(self, text):
|
75 |
+
"""
|
76 |
+
Format the generated text for better readability
|
77 |
+
|
78 |
+
Args:
|
79 |
+
text (str): The text to format
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
str: Formatted text
|
83 |
+
"""
|
84 |
+
# Split into paragraphs
|
85 |
+
paragraphs = text.split('\n\n')
|
86 |
+
|
87 |
+
# Format each paragraph
|
88 |
+
formatted_paragraphs = []
|
89 |
+
for para in paragraphs:
|
90 |
+
if para.strip():
|
91 |
+
# Capitalize first letter
|
92 |
+
para = para.strip()
|
93 |
+
if para:
|
94 |
+
para = para[0].upper() + para[1:]
|
95 |
+
|
96 |
+
# Add proper spacing
|
97 |
+
para = ' '.join(para.split())
|
98 |
+
|
99 |
+
formatted_paragraphs.append(para)
|
100 |
+
|
101 |
+
# Join paragraphs with proper spacing
|
102 |
+
formatted_text = '\n\n'.join(formatted_paragraphs)
|
103 |
+
|
104 |
+
# Ensure proper punctuation
|
105 |
+
if formatted_text and formatted_text[-1] not in '.!?':
|
106 |
+
formatted_text += '.'
|
107 |
+
|
108 |
+
return formatted_text
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--find-links https://download.pytorch.org/whl/torch_stable.html
|
2 |
+
torch>=2.0.0
|
3 |
+
transformers>=4.30.0
|
4 |
+
gradio>=3.50.0
|
5 |
+
diffusers>=0.21.0
|
6 |
+
accelerate>=0.21.0
|
7 |
+
sentencepiece>=0.1.99
|
8 |
+
protobuf>=3.20.0
|
9 |
+
numpy>=1.24.0
|
10 |
+
pillow>=9.5.0
|
11 |
+
scipy>=1.10.0
|
12 |
+
tqdm>=4.65.0
|
13 |
+
requests>=2.31.0
|
14 |
+
langchain>=0.1.0
|