use pipeline instead of endpoint
Browse files- .gitignore +2 -1
- inference.py +8 -1
.gitignore
CHANGED
@@ -2,4 +2,5 @@
|
|
2 |
*.pyc
|
3 |
__pycache__
|
4 |
.env
|
5 |
-
*.env
|
|
|
|
2 |
*.pyc
|
3 |
__pycache__
|
4 |
.env
|
5 |
+
*.env
|
6 |
+
.idea
|
inference.py
CHANGED
@@ -4,6 +4,8 @@ import io
|
|
4 |
import config
|
5 |
import random
|
6 |
|
|
|
|
|
7 |
|
8 |
class DiffusionInference:
|
9 |
def __init__(self, api_key=None):
|
@@ -64,7 +66,7 @@ class DiffusionInference:
|
|
64 |
|
65 |
try:
|
66 |
# Call the API with all parameters as kwargs
|
67 |
-
image = self.
|
68 |
return image
|
69 |
except Exception as e:
|
70 |
print(f"Error generating image: {e}")
|
@@ -159,3 +161,8 @@ class DiffusionInference:
|
|
159 |
os.remove(temp_file)
|
160 |
except Exception as e:
|
161 |
print(f"Warning: Could not delete temporary file {temp_file}: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import config
|
5 |
import random
|
6 |
|
7 |
+
from diffusers import AutoPipelineForText2Image
|
8 |
+
import torch
|
9 |
|
10 |
class DiffusionInference:
|
11 |
def __init__(self, api_key=None):
|
|
|
66 |
|
67 |
try:
|
68 |
# Call the API with all parameters as kwargs
|
69 |
+
image = self.run_text_to_image_pipeline(**params)
|
70 |
return image
|
71 |
except Exception as e:
|
72 |
print(f"Error generating image: {e}")
|
|
|
161 |
os.remove(temp_file)
|
162 |
except Exception as e:
|
163 |
print(f"Warning: Could not delete temporary file {temp_file}: {e}")
|
164 |
+
|
165 |
+
def run_text_to_image_pipeline(self,model_name, **kwargs):
|
166 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(model=model_name, torch_dtype=torch.float16).to("cuda")
|
167 |
+
image = pipeline(**kwargs).images[0]
|
168 |
+
return image
|