Kevin Hu
commited on
Commit
·
c9d78b3
1
Parent(s):
ccb514f
Fix batch size issue. (#3675)
Browse files### What problem does this PR solve?
#3657
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
- rag/llm/embedding_model.py +17 -17
- rag/nlp/query.py +1 -1
- rag/nlp/term_weight.py +2 -2
rag/llm/embedding_model.py
CHANGED
|
@@ -38,7 +38,7 @@ class Base(ABC):
|
|
| 38 |
def __init__(self, key, model_name):
|
| 39 |
pass
|
| 40 |
|
| 41 |
-
def encode(self, texts: list, batch_size=
|
| 42 |
raise NotImplementedError("Please implement encode method!")
|
| 43 |
|
| 44 |
def encode_queries(self, text: str):
|
|
@@ -78,7 +78,7 @@ class DefaultEmbedding(Base):
|
|
| 78 |
use_fp16=torch.cuda.is_available())
|
| 79 |
self._model = DefaultEmbedding._model
|
| 80 |
|
| 81 |
-
def encode(self, texts: list, batch_size=
|
| 82 |
texts = [truncate(t, 2048) for t in texts]
|
| 83 |
token_count = 0
|
| 84 |
for t in texts:
|
|
@@ -101,7 +101,7 @@ class OpenAIEmbed(Base):
|
|
| 101 |
self.client = OpenAI(api_key=key, base_url=base_url)
|
| 102 |
self.model_name = model_name
|
| 103 |
|
| 104 |
-
def encode(self, texts: list, batch_size=
|
| 105 |
texts = [truncate(t, 8191) for t in texts]
|
| 106 |
res = self.client.embeddings.create(input=texts,
|
| 107 |
model=self.model_name)
|
|
@@ -123,7 +123,7 @@ class LocalAIEmbed(Base):
|
|
| 123 |
self.client = OpenAI(api_key="empty", base_url=base_url)
|
| 124 |
self.model_name = model_name.split("___")[0]
|
| 125 |
|
| 126 |
-
def encode(self, texts: list, batch_size=
|
| 127 |
res = self.client.embeddings.create(input=texts, model=self.model_name)
|
| 128 |
return (
|
| 129 |
np.array([d.embedding for d in res.data]),
|
|
@@ -200,7 +200,7 @@ class ZhipuEmbed(Base):
|
|
| 200 |
self.client = ZhipuAI(api_key=key)
|
| 201 |
self.model_name = model_name
|
| 202 |
|
| 203 |
-
def encode(self, texts: list, batch_size=
|
| 204 |
arr = []
|
| 205 |
tks_num = 0
|
| 206 |
for txt in texts:
|
|
@@ -221,7 +221,7 @@ class OllamaEmbed(Base):
|
|
| 221 |
self.client = Client(host=kwargs["base_url"])
|
| 222 |
self.model_name = model_name
|
| 223 |
|
| 224 |
-
def encode(self, texts: list, batch_size=
|
| 225 |
arr = []
|
| 226 |
tks_num = 0
|
| 227 |
for txt in texts:
|
|
@@ -252,7 +252,7 @@ class FastEmbed(Base):
|
|
| 252 |
from fastembed import TextEmbedding
|
| 253 |
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
| 254 |
|
| 255 |
-
def encode(self, texts: list, batch_size=
|
| 256 |
# Using the internal tokenizer to encode the texts and get the total
|
| 257 |
# number of tokens
|
| 258 |
encodings = self._model.model.tokenizer.encode_batch(texts)
|
|
@@ -278,7 +278,7 @@ class XinferenceEmbed(Base):
|
|
| 278 |
self.client = OpenAI(api_key=key, base_url=base_url)
|
| 279 |
self.model_name = model_name
|
| 280 |
|
| 281 |
-
def encode(self, texts: list, batch_size=
|
| 282 |
res = self.client.embeddings.create(input=texts,
|
| 283 |
model=self.model_name)
|
| 284 |
return np.array([d.embedding for d in res.data]
|
|
@@ -394,7 +394,7 @@ class MistralEmbed(Base):
|
|
| 394 |
self.client = MistralClient(api_key=key)
|
| 395 |
self.model_name = model_name
|
| 396 |
|
| 397 |
-
def encode(self, texts: list, batch_size=
|
| 398 |
texts = [truncate(t, 8196) for t in texts]
|
| 399 |
res = self.client.embeddings(input=texts,
|
| 400 |
model=self.model_name)
|
|
@@ -418,7 +418,7 @@ class BedrockEmbed(Base):
|
|
| 418 |
self.client = boto3.client(service_name='bedrock-runtime', region_name=self.bedrock_region,
|
| 419 |
aws_access_key_id=self.bedrock_ak, aws_secret_access_key=self.bedrock_sk)
|
| 420 |
|
| 421 |
-
def encode(self, texts: list, batch_size=
|
| 422 |
texts = [truncate(t, 8196) for t in texts]
|
| 423 |
embeddings = []
|
| 424 |
token_count = 0
|
|
@@ -456,7 +456,7 @@ class GeminiEmbed(Base):
|
|
| 456 |
genai.configure(api_key=key)
|
| 457 |
self.model_name = 'models/' + model_name
|
| 458 |
|
| 459 |
-
def encode(self, texts: list, batch_size=
|
| 460 |
texts = [truncate(t, 2048) for t in texts]
|
| 461 |
token_count = sum(num_tokens_from_string(text) for text in texts)
|
| 462 |
result = genai.embed_content(
|
|
@@ -541,7 +541,7 @@ class CoHereEmbed(Base):
|
|
| 541 |
self.client = Client(api_key=key)
|
| 542 |
self.model_name = model_name
|
| 543 |
|
| 544 |
-
def encode(self, texts: list, batch_size=
|
| 545 |
res = self.client.embed(
|
| 546 |
texts=texts,
|
| 547 |
model=self.model_name,
|
|
@@ -599,7 +599,7 @@ class SILICONFLOWEmbed(Base):
|
|
| 599 |
self.base_url = base_url
|
| 600 |
self.model_name = model_name
|
| 601 |
|
| 602 |
-
def encode(self, texts: list, batch_size=
|
| 603 |
payload = {
|
| 604 |
"model": self.model_name,
|
| 605 |
"input": texts,
|
|
@@ -628,7 +628,7 @@ class ReplicateEmbed(Base):
|
|
| 628 |
self.model_name = model_name
|
| 629 |
self.client = Client(api_token=key)
|
| 630 |
|
| 631 |
-
def encode(self, texts: list, batch_size=
|
| 632 |
res = self.client.run(self.model_name, input={"texts": json.dumps(texts)})
|
| 633 |
return np.array(res), sum([num_tokens_from_string(text) for text in texts])
|
| 634 |
|
|
@@ -647,7 +647,7 @@ class BaiduYiyanEmbed(Base):
|
|
| 647 |
self.client = qianfan.Embedding(ak=ak, sk=sk)
|
| 648 |
self.model_name = model_name
|
| 649 |
|
| 650 |
-
def encode(self, texts: list, batch_size=
|
| 651 |
res = self.client.do(model=self.model_name, texts=texts).body
|
| 652 |
return (
|
| 653 |
np.array([r["embedding"] for r in res["data"]]),
|
|
@@ -669,7 +669,7 @@ class VoyageEmbed(Base):
|
|
| 669 |
self.client = voyageai.Client(api_key=key)
|
| 670 |
self.model_name = model_name
|
| 671 |
|
| 672 |
-
def encode(self, texts: list, batch_size=
|
| 673 |
res = self.client.embed(
|
| 674 |
texts=texts, model=self.model_name, input_type="document"
|
| 675 |
)
|
|
@@ -691,7 +691,7 @@ class HuggingFaceEmbed(Base):
|
|
| 691 |
self.model_name = model_name
|
| 692 |
self.base_url = base_url or "http://127.0.0.1:8080"
|
| 693 |
|
| 694 |
-
def encode(self, texts: list, batch_size=
|
| 695 |
embeddings = []
|
| 696 |
for text in texts:
|
| 697 |
response = requests.post(
|
|
|
|
| 38 |
def __init__(self, key, model_name):
|
| 39 |
pass
|
| 40 |
|
| 41 |
+
def encode(self, texts: list, batch_size=16):
|
| 42 |
raise NotImplementedError("Please implement encode method!")
|
| 43 |
|
| 44 |
def encode_queries(self, text: str):
|
|
|
|
| 78 |
use_fp16=torch.cuda.is_available())
|
| 79 |
self._model = DefaultEmbedding._model
|
| 80 |
|
| 81 |
+
def encode(self, texts: list, batch_size=16):
|
| 82 |
texts = [truncate(t, 2048) for t in texts]
|
| 83 |
token_count = 0
|
| 84 |
for t in texts:
|
|
|
|
| 101 |
self.client = OpenAI(api_key=key, base_url=base_url)
|
| 102 |
self.model_name = model_name
|
| 103 |
|
| 104 |
+
def encode(self, texts: list, batch_size=16):
|
| 105 |
texts = [truncate(t, 8191) for t in texts]
|
| 106 |
res = self.client.embeddings.create(input=texts,
|
| 107 |
model=self.model_name)
|
|
|
|
| 123 |
self.client = OpenAI(api_key="empty", base_url=base_url)
|
| 124 |
self.model_name = model_name.split("___")[0]
|
| 125 |
|
| 126 |
+
def encode(self, texts: list, batch_size=16):
|
| 127 |
res = self.client.embeddings.create(input=texts, model=self.model_name)
|
| 128 |
return (
|
| 129 |
np.array([d.embedding for d in res.data]),
|
|
|
|
| 200 |
self.client = ZhipuAI(api_key=key)
|
| 201 |
self.model_name = model_name
|
| 202 |
|
| 203 |
+
def encode(self, texts: list, batch_size=16):
|
| 204 |
arr = []
|
| 205 |
tks_num = 0
|
| 206 |
for txt in texts:
|
|
|
|
| 221 |
self.client = Client(host=kwargs["base_url"])
|
| 222 |
self.model_name = model_name
|
| 223 |
|
| 224 |
+
def encode(self, texts: list, batch_size=16):
|
| 225 |
arr = []
|
| 226 |
tks_num = 0
|
| 227 |
for txt in texts:
|
|
|
|
| 252 |
from fastembed import TextEmbedding
|
| 253 |
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
| 254 |
|
| 255 |
+
def encode(self, texts: list, batch_size=16):
|
| 256 |
# Using the internal tokenizer to encode the texts and get the total
|
| 257 |
# number of tokens
|
| 258 |
encodings = self._model.model.tokenizer.encode_batch(texts)
|
|
|
|
| 278 |
self.client = OpenAI(api_key=key, base_url=base_url)
|
| 279 |
self.model_name = model_name
|
| 280 |
|
| 281 |
+
def encode(self, texts: list, batch_size=16):
|
| 282 |
res = self.client.embeddings.create(input=texts,
|
| 283 |
model=self.model_name)
|
| 284 |
return np.array([d.embedding for d in res.data]
|
|
|
|
| 394 |
self.client = MistralClient(api_key=key)
|
| 395 |
self.model_name = model_name
|
| 396 |
|
| 397 |
+
def encode(self, texts: list, batch_size=16):
|
| 398 |
texts = [truncate(t, 8196) for t in texts]
|
| 399 |
res = self.client.embeddings(input=texts,
|
| 400 |
model=self.model_name)
|
|
|
|
| 418 |
self.client = boto3.client(service_name='bedrock-runtime', region_name=self.bedrock_region,
|
| 419 |
aws_access_key_id=self.bedrock_ak, aws_secret_access_key=self.bedrock_sk)
|
| 420 |
|
| 421 |
+
def encode(self, texts: list, batch_size=16):
|
| 422 |
texts = [truncate(t, 8196) for t in texts]
|
| 423 |
embeddings = []
|
| 424 |
token_count = 0
|
|
|
|
| 456 |
genai.configure(api_key=key)
|
| 457 |
self.model_name = 'models/' + model_name
|
| 458 |
|
| 459 |
+
def encode(self, texts: list, batch_size=16):
|
| 460 |
texts = [truncate(t, 2048) for t in texts]
|
| 461 |
token_count = sum(num_tokens_from_string(text) for text in texts)
|
| 462 |
result = genai.embed_content(
|
|
|
|
| 541 |
self.client = Client(api_key=key)
|
| 542 |
self.model_name = model_name
|
| 543 |
|
| 544 |
+
def encode(self, texts: list, batch_size=16):
|
| 545 |
res = self.client.embed(
|
| 546 |
texts=texts,
|
| 547 |
model=self.model_name,
|
|
|
|
| 599 |
self.base_url = base_url
|
| 600 |
self.model_name = model_name
|
| 601 |
|
| 602 |
+
def encode(self, texts: list, batch_size=16):
|
| 603 |
payload = {
|
| 604 |
"model": self.model_name,
|
| 605 |
"input": texts,
|
|
|
|
| 628 |
self.model_name = model_name
|
| 629 |
self.client = Client(api_token=key)
|
| 630 |
|
| 631 |
+
def encode(self, texts: list, batch_size=16):
|
| 632 |
res = self.client.run(self.model_name, input={"texts": json.dumps(texts)})
|
| 633 |
return np.array(res), sum([num_tokens_from_string(text) for text in texts])
|
| 634 |
|
|
|
|
| 647 |
self.client = qianfan.Embedding(ak=ak, sk=sk)
|
| 648 |
self.model_name = model_name
|
| 649 |
|
| 650 |
+
def encode(self, texts: list, batch_size=16):
|
| 651 |
res = self.client.do(model=self.model_name, texts=texts).body
|
| 652 |
return (
|
| 653 |
np.array([r["embedding"] for r in res["data"]]),
|
|
|
|
| 669 |
self.client = voyageai.Client(api_key=key)
|
| 670 |
self.model_name = model_name
|
| 671 |
|
| 672 |
+
def encode(self, texts: list, batch_size=16):
|
| 673 |
res = self.client.embed(
|
| 674 |
texts=texts, model=self.model_name, input_type="document"
|
| 675 |
)
|
|
|
|
| 691 |
self.model_name = model_name
|
| 692 |
self.base_url = base_url or "http://127.0.0.1:8080"
|
| 693 |
|
| 694 |
+
def encode(self, texts: list, batch_size=16):
|
| 695 |
embeddings = []
|
| 696 |
for text in texts:
|
| 697 |
response = requests.post(
|
rag/nlp/query.py
CHANGED
|
@@ -54,7 +54,7 @@ class FulltextQueryer:
|
|
| 54 |
def rmWWW(txt):
|
| 55 |
patts = [
|
| 56 |
(
|
| 57 |
-
r"是*(
|
| 58 |
"",
|
| 59 |
),
|
| 60 |
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
|
|
|
|
| 54 |
def rmWWW(txt):
|
| 55 |
patts = [
|
| 56 |
(
|
| 57 |
+
r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*",
|
| 58 |
"",
|
| 59 |
),
|
| 60 |
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
|
rag/nlp/term_weight.py
CHANGED
|
@@ -228,7 +228,7 @@ class Dealer:
|
|
| 228 |
idf2 = np.array([idf(df(t), 1000000000) for t in tks])
|
| 229 |
wts = (0.3 * idf1 + 0.7 * idf2) * \
|
| 230 |
np.array([ner(t) * postag(t) for t in tks])
|
| 231 |
-
wts = [
|
| 232 |
tw = list(zip(tks, wts))
|
| 233 |
else:
|
| 234 |
for tk in tks:
|
|
@@ -237,7 +237,7 @@ class Dealer:
|
|
| 237 |
idf2 = np.array([idf(df(t), 1000000000) for t in tt])
|
| 238 |
wts = (0.3 * idf1 + 0.7 * idf2) * \
|
| 239 |
np.array([ner(t) * postag(t) for t in tt])
|
| 240 |
-
wts = [
|
| 241 |
tw.extend(zip(tt, wts))
|
| 242 |
|
| 243 |
S = np.sum([s for _, s in tw])
|
|
|
|
| 228 |
idf2 = np.array([idf(df(t), 1000000000) for t in tks])
|
| 229 |
wts = (0.3 * idf1 + 0.7 * idf2) * \
|
| 230 |
np.array([ner(t) * postag(t) for t in tks])
|
| 231 |
+
wts = [s for s in wts]
|
| 232 |
tw = list(zip(tks, wts))
|
| 233 |
else:
|
| 234 |
for tk in tks:
|
|
|
|
| 237 |
idf2 = np.array([idf(df(t), 1000000000) for t in tt])
|
| 238 |
wts = (0.3 * idf1 + 0.7 * idf2) * \
|
| 239 |
np.array([ner(t) * postag(t) for t in tt])
|
| 240 |
+
wts = [s for s in wts]
|
| 241 |
tw.extend(zip(tt, wts))
|
| 242 |
|
| 243 |
S = np.sum([s for _, s in tw])
|