Nyandwi commited on
Commit
e8b30c3
·
verified ·
1 Parent(s): 92157d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -712,11 +712,10 @@ if __name__ == "__main__":
712
  )
713
  # argparser.add_argument("--model-path", type=str, default="facebook/opt-350m")
714
  argparser.add_argument("--model-base", type=str, default=None)
715
- argparser.add_argument("--device_map", type=str, default="auto")
716
  # argparser.add_argument("--num-gpus", type=int, default=1)
717
  argparser.add_argument("--conv-mode", type=str, default=None)
718
  argparser.add_argument("--temperature", type=float, default=0.7)
719
- argparser.add_argument("--max-new-tokens", type=int, default=4096)
720
  argparser.add_argument("--num_frames", type=int, default=16)
721
  argparser.add_argument("--load-8bit", action="store_true")
722
  argparser.add_argument("--load-4bit", action="store_true")
@@ -728,7 +727,8 @@ if __name__ == "__main__":
728
 
729
  model_path = args.model_path
730
  filt_invalid = "cut"
731
- lava_kwargs = {"multimodal": True}
 
732
  #model_name = get_model_name_from_path(args.model_path)
733
  model_name = "pangea_llava_qwen"
734
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name, args.load_8bit, args.load_4bit, **lava_kwargs)
 
712
  )
713
  # argparser.add_argument("--model-path", type=str, default="facebook/opt-350m")
714
  argparser.add_argument("--model-base", type=str, default=None)
 
715
  # argparser.add_argument("--num-gpus", type=int, default=1)
716
  argparser.add_argument("--conv-mode", type=str, default=None)
717
  argparser.add_argument("--temperature", type=float, default=0.7)
718
+ argparser.add_argument("--max-new-tokens", type=int, default=250)
719
  argparser.add_argument("--num_frames", type=int, default=16)
720
  argparser.add_argument("--load-8bit", action="store_true")
721
  argparser.add_argument("--load-4bit", action="store_true")
 
727
 
728
  model_path = args.model_path
729
  filt_invalid = "cut"
730
+ lava_kwargs = {"multimodal": True, "device_map": "cpu"}
731
+ }
732
  #model_name = get_model_name_from_path(args.model_path)
733
  model_name = "pangea_llava_qwen"
734
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name, args.load_8bit, args.load_4bit, **lava_kwargs)