Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -403,6 +403,8 @@ def http_gen_edit_bot(state, temperature, top_k, top_p, image_gen_temperature,
|
|
403 |
generated_image = None
|
404 |
generated_text = ""
|
405 |
try:
|
|
|
|
|
406 |
with torch.inference_mode(): # Ensure no gradients are calculated
|
407 |
output_ids = model.generate(
|
408 |
**inputs,
|
@@ -456,7 +458,7 @@ def http_gen_edit_bot(state, temperature, top_k, top_p, image_gen_temperature,
|
|
456 |
original_image_sizes) == 1: # editing task, unpad and resize image to original size
|
457 |
original_size = original_image_sizes[0]
|
458 |
logging.info(f"original size: {original_size}. Output Image size: {generated_image.size}")
|
459 |
-
|
460 |
logging.info(f"final image size: {generated_image.size}")
|
461 |
logging.info("Image successfully generated.")
|
462 |
# <image> is placeholder.
|
@@ -830,9 +832,9 @@ if __name__ == "__main__":
|
|
830 |
model = AutoModel.from_pretrained(
|
831 |
args.model_name,
|
832 |
torch_dtype=torch.bfloat16,
|
833 |
-
attn_implementation='
|
834 |
# torch_dtype=args.torch_dtype,
|
835 |
-
|
836 |
low_cpu_mem_usage=True,
|
837 |
trust_remote_code=True).eval().cuda()
|
838 |
processor = AutoProcessor.from_pretrained(args.model_name, trust_remote_code=True)
|
|
|
403 |
generated_image = None
|
404 |
generated_text = ""
|
405 |
try:
|
406 |
+
from transformers import set_seed
|
407 |
+
set_seed(42)
|
408 |
with torch.inference_mode(): # Ensure no gradients are calculated
|
409 |
output_ids = model.generate(
|
410 |
**inputs,
|
|
|
458 |
original_image_sizes) == 1: # editing task, unpad and resize image to original size
|
459 |
original_size = original_image_sizes[0]
|
460 |
logging.info(f"original size: {original_size}. Output Image size: {generated_image.size}")
|
461 |
+
generated_image = processor.unpad_and_resize_back(generated_image, original_size[0], original_size[1])
|
462 |
logging.info(f"final image size: {generated_image.size}")
|
463 |
logging.info("Image successfully generated.")
|
464 |
# <image> is placeholder.
|
|
|
832 |
model = AutoModel.from_pretrained(
|
833 |
args.model_name,
|
834 |
torch_dtype=torch.bfloat16,
|
835 |
+
# attn_implementation='flash_attention_2', # OR 'sdpa' for Ascend NPUs
|
836 |
# torch_dtype=args.torch_dtype,
|
837 |
+
attn_implementation='sdpa', # OR 'sdpa' for Ascend NPUs
|
838 |
low_cpu_mem_usage=True,
|
839 |
trust_remote_code=True).eval().cuda()
|
840 |
processor = AutoProcessor.from_pretrained(args.model_name, trust_remote_code=True)
|