hank1229 commited on
Commit
f91e2fa
·
verified ·
1 Parent(s): 5785668

Update llm.py

Browse files
Files changed (1) hide show
  1. models/llm.py +6 -8
models/llm.py CHANGED
@@ -1,4 +1,4 @@
1
-
2
  from transformers import pipeline
3
 
4
  class StyleSavvy:
@@ -8,7 +8,6 @@ class StyleSavvy:
8
  device: int = -1, # -1 = CPU, or GPU index
9
  max_length: int = 150,
10
  ):
11
- # A local instruction-tuned T5 model
12
  self.pipe = pipeline(
13
  "text2text-generation",
14
  model=model_name,
@@ -16,12 +15,11 @@ class StyleSavvy:
16
  device=device,
17
  )
18
  self.max_length = max_length
19
- # TODO: Modification: Add more prompts to the advise function
20
- # to make it more specific to the user's needs.
21
- # The function now takes in the user's body type, face shape, and occasion
22
- # and generates style tips accordingly.
23
 
24
  def advise(self, items, body_type, face_shape, occasion):
 
 
 
25
  prompt = (
26
  f"The user is {body_type}-shaped with a {face_shape} face, "
27
  f"attending a {occasion}. They are wearing: "
@@ -29,7 +27,6 @@ class StyleSavvy:
29
  + ".\n\nPlease list 5 concise style tips as bullet points:"
30
  )
31
 
32
- # Generate with supported args only
33
  result = self.pipe(
34
  prompt,
35
  max_length=self.max_length,
@@ -37,12 +34,13 @@ class StyleSavvy:
37
  early_stopping=True,
38
  do_sample=False
39
  )[0]["generated_text"].strip()
40
-
41
  return result
42
 
43
 
44
 
45
 
 
46
  # import torch
47
 
48
  # # models/llm.py
 
1
+ import json
2
  from transformers import pipeline
3
 
4
  class StyleSavvy:
 
8
  device: int = -1, # -1 = CPU, or GPU index
9
  max_length: int = 150,
10
  ):
 
11
  self.pipe = pipeline(
12
  "text2text-generation",
13
  model=model_name,
 
15
  device=device,
16
  )
17
  self.max_length = max_length
 
 
 
 
18
 
19
  def advise(self, items, body_type, face_shape, occasion):
20
+ # 如果 items 是 JSON 字串,先 parse 回 Python list
21
+ items = json.loads(items) if isinstance(items, str) else items
22
+
23
  prompt = (
24
  f"The user is {body_type}-shaped with a {face_shape} face, "
25
  f"attending a {occasion}. They are wearing: "
 
27
  + ".\n\nPlease list 5 concise style tips as bullet points:"
28
  )
29
 
 
30
  result = self.pipe(
31
  prompt,
32
  max_length=self.max_length,
 
34
  early_stopping=True,
35
  do_sample=False
36
  )[0]["generated_text"].strip()
37
+
38
  return result
39
 
40
 
41
 
42
 
43
+
44
  # import torch
45
 
46
  # # models/llm.py