Spaces:
Running
on
Zero
Running
on
Zero
added model download
Browse files
app.py
CHANGED
@@ -48,7 +48,7 @@ def load_model(fp):
|
|
48 |
]
|
49 |
|
50 |
for prompt in prompts:
|
51 |
-
print('Making inference...')
|
52 |
output = llm(
|
53 |
prompt,
|
54 |
max_tokens=512,
|
|
|
48 |
]
|
49 |
|
50 |
for prompt in prompts:
|
51 |
+
print(f'Making inference... {prompt}')
|
52 |
output = llm(
|
53 |
prompt,
|
54 |
max_tokens=512,
|