File size: 3,878 Bytes
f91e2fa
10d8299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f91e2fa
 
 
10d8299
 
 
 
 
 
 
 
 
 
 
 
 
 
f91e2fa
10d8299
 
 
 
 
f91e2fa
10d8299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import json
from transformers import pipeline

class StyleSavvy:
    def __init__(
        self,
        model_name: str = "google/flan-t5-large",
        device: int   = -1,     # -1 = CPU, or GPU index
        max_length: int = 150,
    ):
        self.pipe = pipeline(
            "text2text-generation",
            model=model_name,
            tokenizer=model_name,
            device=device,
        )
        self.max_length = max_length

    def advise(self, items, body_type, face_shape, occasion):
        # 如果 items 是 JSON 字串,先 parse 回 Python list
        items = json.loads(items) if isinstance(items, str) else items

        prompt = (
            f"The user is {body_type}-shaped with a {face_shape} face, "
            f"attending a {occasion}. They are wearing: "
            + ", ".join(i["label"] for i in items)
            + ".\n\nPlease list 5 concise style tips as bullet points:"
        )

        result = self.pipe(
            prompt,
            max_length=self.max_length,
            num_beams=4,
            early_stopping=True,
            do_sample=False
        )[0]["generated_text"].strip()

        return result





# import torch

# # models/llm.py

# # models/llm.py

# import os
# from typing import List
# from transformers import pipeline, Pipeline

# # Force CPU modes (avoid any MPS/CUDA issues on macOS)
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
# os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "0"

# class StyleSavvy:
#     def __init__(
#         self,
#         model_name: str = "openlm-research/open_llama_3b_v2",
#         device: int   = -1,        # -1 = CPU
#         max_new_tokens: int = 100,
#         temperature: float   = 0.7,
#         top_p: float         = 0.9,
#     ):
#         """
#         Uses OpenLLaMA-3B-v2 (≈3B params) for fast, local inference.
#         """
#         # Setup a causal text-generation pipeline
#         self.pipe: Pipeline = pipeline(
#             "text-generation",
#             model=model_name,
#             tokenizer=model_name,
#             device=device,
#         )
#         # GPT‐style models need a pad token to avoid warnings
#         if self.pipe.tokenizer.pad_token_id is None:
#             self.pipe.tokenizer.pad_token = self.pipe.tokenizer.eos_token
        
#         self.max_new_tokens = max_new_tokens
#         self.temperature    = temperature
#         self.top_p          = top_p

#     def advise(
#         self,
#         items: List[str],
#         body_type: str,
#         face_shape: str,
#         occasion: str
#     ) -> List[str]:
#         """
#         Builds a strict instruction prompt and returns exactly five "- " bullets.
#         """
#         labels = ", ".join(items) if items else "an outfit"
#         prompt = (
#             "You are a professional fashion consultant.\n"
#             f"The user is {body_type}-shaped with a {face_shape} face, attending {occasion}.\n"
#             f"They are wearing: {labels}.\n\n"
#             "Please provide exactly five concise style tips, each on its own line, "
#             "and starting with \"- \". No extra text."
#         )

#         # Generate
#         output = self.pipe(
#             prompt,
#             max_new_tokens=self.max_new_tokens,
#             do_sample=True,
#             temperature=self.temperature,
#             top_p=self.top_p,
#             return_full_text=False,
#         )[0]["generated_text"]

#         # Extract bullets
#         tips = [ln.strip() for ln in output.splitlines() if ln.strip().startswith("- ")]
#         # Fallback: split on sentences if fewer than 5 bullets
#         if len(tips) < 5:
#             candidates = [s.strip() for s in output.replace("\n"," ").split(".") if s.strip()]
#             tips = [f"- {candidates[i]}" for i in range(min(5, len(candidates)))]
        
#         return tips[:5]