Lemorra commited on
Commit
c257d4f
Β·
1 Parent(s): 2b7bc37

πŸ› Bug fix for no permission issue

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. src/utils/qwen_inference.py +2 -2
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  fastapi
2
  uvicorn[standard]
3
- git+https://github.com/huggingface/transformers
4
  accelerate
5
  qwen-vl-utils[decord]==0.0.8
6
  python-dotenv
 
1
  fastapi
2
  uvicorn[standard]
3
+ transformers
4
  accelerate
5
  qwen-vl-utils[decord]==0.0.8
6
  python-dotenv
src/utils/qwen_inference.py CHANGED
@@ -1,5 +1,5 @@
1
  from .payload_model import SingleInferencePayload, VideoInferencePayload
2
- from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
3
  from qwen_vl_utils import process_vision_info
4
  from pydantic import BaseModel
5
  import torch
@@ -7,7 +7,7 @@ import torch
7
 
8
  class Qwen2_5(BaseModel):
9
  def __init__(self, model_path: str):
10
- self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
11
  model_path, torch_dtype="auto", device_map="auto"
12
  )
13
  self.tokenizer = AutoTokenizer.from_pretrained(model_path)
 
1
  from .payload_model import SingleInferencePayload, VideoInferencePayload
2
+ from transformers import AutoModelForVision2Seq, AutoTokenizer, AutoProcessor
3
  from qwen_vl_utils import process_vision_info
4
  from pydantic import BaseModel
5
  import torch
 
7
 
8
  class Qwen2_5(BaseModel):
9
  def __init__(self, model_path: str):
10
+ self.model = AutoModelForVision2Seq.from_pretrained(
11
  model_path, torch_dtype="auto", device_map="auto"
12
  )
13
  self.tokenizer = AutoTokenizer.from_pretrained(model_path)