Upload inference.py with huggingface_hub
Browse files- inference.py +124 -2
inference.py
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
import abc
|
|
|
|
|
|
|
2 |
|
3 |
from .artifact import Artifact
|
|
|
|
|
4 |
|
5 |
|
6 |
class InferenceEngine(abc.ABC, Artifact):
|
@@ -11,12 +16,21 @@ class InferenceEngine(abc.ABC, Artifact):
|
|
11 |
"""Perform inference on the input dataset."""
|
12 |
pass
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
class HFPipelineBasedInferenceEngine(Artifact):
|
16 |
-
"""Abstract base class for inference."""
|
17 |
|
|
|
18 |
model_name: str
|
19 |
max_new_tokens: int
|
|
|
|
|
|
|
20 |
|
21 |
def prepare(self):
|
22 |
from transformers import pipeline
|
@@ -31,3 +45,111 @@ class HFPipelineBasedInferenceEngine(Artifact):
|
|
31 |
max_new_tokens=self.max_new_tokens,
|
32 |
)
|
33 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import abc
|
2 |
+
import os
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import List, Optional, Union
|
5 |
|
6 |
from .artifact import Artifact
|
7 |
+
from .operator import PackageRequirementsMixin
|
8 |
+
from .settings_utils import get_settings
|
9 |
|
10 |
|
11 |
class InferenceEngine(abc.ABC, Artifact):
|
|
|
16 |
"""Perform inference on the input dataset."""
|
17 |
pass
|
18 |
|
19 |
+
@staticmethod
|
20 |
+
def _assert_allow_passing_data_to_remote_api(remote_api_label: str):
|
21 |
+
assert get_settings().allow_passing_data_to_remote_api, (
|
22 |
+
f"LlmAsJudge metric cannot run send data to remote APIs ({remote_api_label}) when"
|
23 |
+
f" unitxt.settings.allow_passing_data_to_remote_api=False."
|
24 |
+
f" Set UNITXT_ALLOW_PASSING_DATA_TO_REMOTE_API environment variable, if you want to allow this. "
|
25 |
+
)
|
26 |
|
|
|
|
|
27 |
|
28 |
+
class HFPipelineBasedInferenceEngine(InferenceEngine, PackageRequirementsMixin):
|
29 |
model_name: str
|
30 |
max_new_tokens: int
|
31 |
+
_requirement = {
|
32 |
+
"transformers": "Install huggingface package using 'pip install --upgrade transformers"
|
33 |
+
}
|
34 |
|
35 |
def prepare(self):
|
36 |
from transformers import pipeline
|
|
|
45 |
max_new_tokens=self.max_new_tokens,
|
46 |
)
|
47 |
]
|
48 |
+
|
49 |
+
|
50 |
+
@dataclass()
|
51 |
+
class IbmGenAiInferenceEngineParams:
|
52 |
+
decoding_method: str = None
|
53 |
+
max_new_tokens: Optional[int] = None
|
54 |
+
min_new_tokens: Optional[int] = None
|
55 |
+
random_seed: Optional[int] = None
|
56 |
+
repetition_penalty: Optional[float] = None
|
57 |
+
stop_sequences: Optional[List[str]] = None
|
58 |
+
temperature: Optional[float] = None
|
59 |
+
top_k: Optional[int] = None
|
60 |
+
top_p: Optional[float] = None
|
61 |
+
typical_p: Optional[float] = None
|
62 |
+
|
63 |
+
|
64 |
+
class IbmGenAiInferenceEngine(InferenceEngine, PackageRequirementsMixin):
|
65 |
+
label: str = "ibm_genai"
|
66 |
+
model_name: str
|
67 |
+
parameters: IbmGenAiInferenceEngineParams = IbmGenAiInferenceEngineParams()
|
68 |
+
_requirement = {
|
69 |
+
"genai": "Install ibm-genai package using 'pip install --upgrade ibm-generative-ai"
|
70 |
+
}
|
71 |
+
|
72 |
+
def prepare(self):
|
73 |
+
from genai import Client, Credentials
|
74 |
+
|
75 |
+
api_key_env_var_name = "GENAI_KEY"
|
76 |
+
api_key = os.environ.get(api_key_env_var_name)
|
77 |
+
assert api_key is not None, (
|
78 |
+
f"Error while trying to run IbmGenAiInferenceEngine."
|
79 |
+
f" Please set the environment param '{api_key_env_var_name}'."
|
80 |
+
)
|
81 |
+
api_endpoint = os.environ.get("GENAI_KEY")
|
82 |
+
credentials = Credentials(api_key=api_key, api_endpoint=api_endpoint)
|
83 |
+
self.client = Client(credentials=credentials)
|
84 |
+
|
85 |
+
self._assert_allow_passing_data_to_remote_api(self.label)
|
86 |
+
|
87 |
+
def infer(self, dataset):
|
88 |
+
from genai.schema import TextGenerationParameters
|
89 |
+
|
90 |
+
genai_params = TextGenerationParameters(**self.parameters.__dict__)
|
91 |
+
return list(
|
92 |
+
self.client.text.generation.create(
|
93 |
+
model_id=self.model_name,
|
94 |
+
inputs=[instance["source"] for instance in dataset],
|
95 |
+
parameters=genai_params,
|
96 |
+
)
|
97 |
+
)
|
98 |
+
|
99 |
+
|
100 |
+
@dataclass
|
101 |
+
class OpenAiInferenceEngineParams:
|
102 |
+
frequency_penalty: Optional[float] = None
|
103 |
+
presence_penalty: Optional[float] = None
|
104 |
+
max_tokens: Optional[int] = None
|
105 |
+
seed: Optional[int] = None
|
106 |
+
stop: Union[Optional[str], List[str]] = None
|
107 |
+
temperature: Optional[float] = None
|
108 |
+
top_p: Optional[float] = None
|
109 |
+
|
110 |
+
|
111 |
+
class OpenAiInferenceEngine(InferenceEngine, PackageRequirementsMixin):
|
112 |
+
label: str = "openai"
|
113 |
+
model_name: str
|
114 |
+
parameters: OpenAiInferenceEngineParams = OpenAiInferenceEngineParams()
|
115 |
+
_requirement = {
|
116 |
+
"openai": "Install openai package using 'pip install --upgrade openai"
|
117 |
+
}
|
118 |
+
|
119 |
+
def prepare(self):
|
120 |
+
from openai import OpenAI
|
121 |
+
|
122 |
+
api_key_env_var_name = "OPENAI_API_KEY"
|
123 |
+
api_key = os.environ.get(api_key_env_var_name)
|
124 |
+
assert api_key is not None, (
|
125 |
+
f"Error while trying to run OpenAiInferenceEngine."
|
126 |
+
f" Please set the environment param '{api_key_env_var_name}'."
|
127 |
+
)
|
128 |
+
|
129 |
+
self.client = OpenAI(api_key=api_key)
|
130 |
+
self._assert_allow_passing_data_to_remote_api(self.label)
|
131 |
+
|
132 |
+
def infer(self, dataset):
|
133 |
+
return [
|
134 |
+
self.client.chat.completions.create(
|
135 |
+
messages=[
|
136 |
+
# {
|
137 |
+
# "role": "system",
|
138 |
+
# "content": self.system_prompt,
|
139 |
+
# },
|
140 |
+
{
|
141 |
+
"role": "user",
|
142 |
+
"content": instance["source"],
|
143 |
+
}
|
144 |
+
],
|
145 |
+
model=self.model_name,
|
146 |
+
frequency_penalty=self.parameters.frequency_penalty,
|
147 |
+
presence_penalty=self.parameters.presence_penalty,
|
148 |
+
max_tokens=self.parameters.max_tokens,
|
149 |
+
seed=self.parameters.seed,
|
150 |
+
stop=self.parameters.stop,
|
151 |
+
temperature=self.parameters.temperature,
|
152 |
+
top_p=self.parameters.top_p,
|
153 |
+
)
|
154 |
+
for instance in dataset
|
155 |
+
]
|