code
stringlengths 141
97.3k
| apis
sequencelengths 1
24
| extract_api
stringlengths 113
214k
|
---|---|---|
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((1068, 1103), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1088, 1103), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1161, 1204), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1189, 1204), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1262, 1307), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1292, 1307), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1361, 1402), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1387, 1402), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1458, 1491), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1476, 1491), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1544, 1584), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1569, 1584), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1635, 1666), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1651, 1666), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.argilla_callback import argilla_callback_handler
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((1144, 1179), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1164, 1179), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1237, 1280), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1265, 1280), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1338, 1383), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1368, 1383), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1437, 1478), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1534, 1567), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1552, 1567), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1620, 1660), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1645, 1660), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1711, 1742), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1727, 1742), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1794, 1833), 'llama_index.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1818, 1833), False, 'from llama_index.callbacks.argilla_callback import argilla_callback_handler\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.legacy.core.response.schema.Response"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.legacy.core.response.schema.Response"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.core.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id",
include_metadata=True,
metadata_keys=['file_name', 'creation_date']
)
index = VectorStoreIndex.from_vector_store(
vector_store=google_vector_store
)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
include_metadata (bool): Indicates whether to include custom metadata in the query
results. Defaults to False.
metadata_keys (Optional[List[str]]): Specifies which metadata keys to include in the
query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
# Configuration options for handling metadata in query results
include_metadata: bool = False
metadata_keys: Optional[List[str]] = None
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(
cls,
*,
corpus_id: str,
include_metadata: bool = False,
metadata_keys: Optional[List[str]] = None,
) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id (str): ID of an existing corpus on Google's server.
include_metadata (bool, optional): Specifies whether to include custom metadata in the
query results. Defaults to False, meaning metadata will not be included.
metadata_keys (Optional[List[str]], optional): Specifies which metadata keys to include
in the query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(
corpus_id=corpus_id,
client=client,
include_metadata=include_metadata,
metadata_keys=metadata_keys,
)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.core.vector_stores.types.VectorStoreQuery`.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
nodes = []
include_metadata = self.include_metadata
metadata_keys = self.metadata_keys
for chunk in relevant_chunks:
metadata = {}
if include_metadata:
for custom_metadata in chunk.chunk.custom_metadata:
# Use getattr to safely extract values
value = getattr(custom_metadata, "string_value", None)
if (
value is None
): # If string_value is not set, check for numeric_value
value = getattr(custom_metadata, "numeric_value", None)
# Add to the metadata dictionary only those keys that are present in metadata_keys
if value is not None and (
metadata_keys is None or custom_metadata.key in metadata_keys
):
metadata[custom_metadata.key] = value
text_node = TextNode(
text=chunk.chunk.data.string_value,
id=_extract_chunk_id(chunk.chunk.name),
metadata=metadata, # Adding metadata to the node
)
nodes.append(text_node)
return VectorStoreQueryResult(
nodes=nodes,
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.genai_extension.delete_document",
"llama_index.vector_stores.google.genai_extension.Config",
"llama_index.vector_stores.google.genai_extension.get_corpus",
"llama_index.vector_stores.google.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.genai_extension.create_corpus",
"llama_index.vector_stores.google.genai_extension.build_semantic_retriever",
"llama_index.vector_stores.google.genai_extension.get_document",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.vector_stores.google.genai_extension.set_config",
"llama_index.core.schema.RelatedNodeInfo"
] | [((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((2886, 2911), 'llama_index.vector_stores.google.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (2903, 2911), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4343, 4361), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4348, 4361), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4600, 4613), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4611, 4613), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((6413, 6446), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (6444, 6446), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8216, 8249), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (8247, 8249), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8326, 8417), 'llama_index.vector_stores.google.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (8346, 8417), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8451, 8494), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (8477, 8494), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10566, 10613), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (10570, 10613), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12317, 12364), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (12321, 12364), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12373, 12464), 'llama_index.vector_stores.google.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (12395, 12464), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((13968, 14015), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13972, 14015), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((16832, 16871), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (16858, 16871), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((6458, 6511), 'llama_index.vector_stores.google.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (6475, 6511), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10819, 10908), 'llama_index.vector_stores.google.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (10838, 10908), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((17496, 17536), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (17511, 17536), False, 'from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((8291, 8303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8301, 8303), False, 'import uuid\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response",
"llama_index.schema.TextNode",
"llama_index.indices.query.schema.QueryBundle"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""
Astra DB Vector store index.
An index based on a DB table with vector search capabilities,
powered by the astrapy library
"""
import json
import logging
from typing import Any, Dict, List, Optional, cast
from warnings import warn
import llama_index.core
from llama_index.core.bridge.pydantic import PrivateAttr
from astrapy.db import AstraDB
from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
DEFAULT_MMR_PREFETCH_FACTOR = 4.0
MAX_INSERT_BATCH_SIZE = 20
NON_INDEXED_FIELDS = ["metadata._node_content", "content"]
class AstraDBVectorStore(BasePydanticVectorStore):
"""
Astra DB Vector Store.
An abstraction of a Astra table with
vector-similarity-search. Documents, and their embeddings, are stored
in an Astra table and a vector-capable index is used for searches.
The table does not need to exist beforehand: if necessary it will
be created behind the scenes.
All Astra operations are done through the astrapy library.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
ttl_seconds (Optional[int]): expiration time for inserted entries.
Default is no expiration.
"""
stores_text: bool = True
flat_metadata: bool = True
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_astra_db: Any = PrivateAttr()
_astra_db_collection: Any = PrivateAttr()
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> None:
super().__init__()
# Set all the required class parameters
self._embedding_dimension = embedding_dimension
self._ttl_seconds = ttl_seconds
_logger.debug("Creating the Astra DB table")
# Build the Astra DB object
self._astra_db = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
from astrapy.api import APIRequestError
try:
# Create and connect to the newly created collection
self._astra_db_collection = self._astra_db.create_collection(
collection_name=collection_name,
dimension=embedding_dimension,
options={"indexing": {"deny": NON_INDEXED_FIELDS}},
)
except APIRequestError:
# possibly the collection is preexisting and has legacy
# indexing settings: verify
get_coll_response = self._astra_db.get_collections(
options={"explain": True}
)
collections = (get_coll_response["status"] or {}).get("collections") or []
preexisting = [
collection
for collection in collections
if collection["name"] == collection_name
]
if preexisting:
pre_collection = preexisting[0]
# if it has no "indexing", it is a legacy collection;
# otherwise it's unexpected warn and proceed at user's risk
pre_col_options = pre_collection.get("options") or {}
if "indexing" not in pre_col_options:
warn(
(
f"Collection '{collection_name}' is detected as "
"having indexing turned on for all fields "
"(either created manually or by older versions "
"of this plugin). This implies stricter "
"limitations on the amount of text"
" each entry can store. Consider reindexing anew on a"
" fresh collection to be able to store longer texts."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
options_json = json.dumps(pre_col_options["indexing"])
warn(
(
f"Collection '{collection_name}' has unexpected 'indexing'"
f" settings (options.indexing = {options_json})."
" This can result in odd behaviour when running "
" metadata filtering and/or unwarranted limitations"
" on storing long texts. Consider reindexing anew on a"
" fresh collection."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
# other exception
raise
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of node with embeddings
"""
# Initialize list of objects to track
nodes_list = []
# Process each node individually
for node in nodes:
# Get the metadata
metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
# One dictionary of node data per node
nodes_list.append(
{
"_id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
"$vector": node.get_embedding(),
}
)
# Log the number of rows being added
_logger.debug(f"Adding {len(nodes_list)} rows to table")
# Initialize an empty list to hold the batches
batched_list = []
# Iterate over the node_list in steps of MAX_INSERT_BATCH_SIZE
for i in range(0, len(nodes_list), MAX_INSERT_BATCH_SIZE):
# Append a slice of node_list to the batched_list
batched_list.append(nodes_list[i : i + MAX_INSERT_BATCH_SIZE])
# Perform the bulk insert
for i, batch in enumerate(batched_list):
_logger.debug(f"Processing batch #{i + 1} of size {len(batch)}")
# Go to astrapy to perform the bulk insert
self._astra_db_collection.insert_many(batch)
# Return the list of ids
return [str(n["_id"]) for n in nodes_list]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
_logger.debug("Deleting a document from the Astra table")
self._astra_db_collection.delete(id=ref_doc_id, **delete_kwargs)
@property
def client(self) -> Any:
"""Return the underlying Astra vector table object."""
return self._astra_db_collection
@staticmethod
def _query_filters_to_dict(query_filters: MetadataFilters) -> Dict[str, Any]:
# Allow only legacy ExactMatchFilter and MetadataFilter with FilterOperator.EQ
if not all(
(
isinstance(f, ExactMatchFilter)
or (isinstance(f, MetadataFilter) and f.operator == FilterOperator.EQ)
)
for f in query_filters.filters
):
raise NotImplementedError(
"Only filters with operator=FilterOperator.EQ are supported"
)
return {f"metadata.{f.key}": f.value for f in query_filters.filters}
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
# Get the currently available query modes
_available_query_modes = [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.MMR,
]
# Reject query if not available
if query.mode not in _available_query_modes:
raise NotImplementedError(f"Query mode {query.mode} not available.")
# Get the query embedding
query_embedding = cast(List[float], query.query_embedding)
# Process the metadata filters as needed
if query.filters is not None:
query_metadata = self._query_filters_to_dict(query.filters)
else:
query_metadata = {}
# Get the scores depending on the query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
# Call the vector_find method of AstraPy
matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=query.similarity_top_k,
filter=query_metadata,
)
# Get the scores associated with each
top_k_scores = [match["$similarity"] for match in matches]
elif query.mode == VectorStoreQueryMode.MMR:
# Querying a larger number of vectors and then doing MMR on them.
if (
kwargs.get("mmr_prefetch_factor") is not None
and kwargs.get("mmr_prefetch_k") is not None
):
raise ValueError(
"'mmr_prefetch_factor' and 'mmr_prefetch_k' "
"cannot coexist in a call to query()"
)
else:
if kwargs.get("mmr_prefetch_k") is not None:
prefetch_k0 = int(kwargs["mmr_prefetch_k"])
else:
prefetch_k0 = int(
query.similarity_top_k
* kwargs.get("mmr_prefetch_factor", DEFAULT_MMR_PREFETCH_FACTOR)
)
# Get the most we can possibly need to fetch
prefetch_k = max(prefetch_k0, query.similarity_top_k)
# Call AstraPy to fetch them
prefetch_matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=prefetch_k,
filter=query_metadata,
)
# Get the MMR threshold
mmr_threshold = query.mmr_threshold or kwargs.get("mmr_threshold")
# If we have found documents, we can proceed
if prefetch_matches:
zipped_indices, zipped_embeddings = zip(
*enumerate(match["$vector"] for match in prefetch_matches)
)
pf_match_indices, pf_match_embeddings = list(zipped_indices), list(
zipped_embeddings
)
else:
pf_match_indices, pf_match_embeddings = [], []
# Call the Llama utility function to get the top k
mmr_similarities, mmr_indices = get_top_k_mmr_embeddings(
query_embedding,
pf_match_embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=pf_match_indices,
mmr_threshold=mmr_threshold,
)
# Finally, build the final results based on the mmr values
matches = [prefetch_matches[mmr_index] for mmr_index in mmr_indices]
top_k_scores = mmr_similarities
# We have three lists to return
top_k_nodes = []
top_k_ids = []
# Get every match
for match in matches:
# Check whether we have a llama-generated node content field
if "_node_content" not in match["metadata"]:
match["metadata"]["_node_content"] = json.dumps(match)
# Create a new node object from the node metadata
node = metadata_dict_to_node(match["metadata"], text=match["content"])
# Append to the respective lists
top_k_nodes.append(node)
top_k_ids.append(match["_id"])
# return our final result
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
| [
"llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.vector_stores.utils.node_to_metadata_dict",
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.types.VectorStoreQueryResult"
] | [((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, 2131), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2153, 2166), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2164, 2166), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2199, 2212), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2210, 2212), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((9525, 9565), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (9529, 9565), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((13259, 13347), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'top_k_nodes', 'similarities': 'top_k_scores', 'ids': 'top_k_ids'}), '(nodes=top_k_nodes, similarities=top_k_scores, ids=\n top_k_ids)\n', (13281, 13347), False, 'from llama_index.core.vector_stores.types import BasePydanticVectorStore, ExactMatchFilter, FilterOperator, MetadataFilter, MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6510, 6589), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (6531, 6589), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13019, 13082), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (["match['metadata']"], {'text': "match['content']"}), "(match['metadata'], text=match['content'])\n", (13040, 13082), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12133, 12305), 'llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings', 'get_top_k_mmr_embeddings', (['query_embedding', 'pf_match_embeddings'], {'similarity_top_k': 'query.similarity_top_k', 'embedding_ids': 'pf_match_indices', 'mmr_threshold': 'mmr_threshold'}), '(query_embedding, pf_match_embeddings,\n similarity_top_k=query.similarity_top_k, embedding_ids=pf_match_indices,\n mmr_threshold=mmr_threshold)\n', (12157, 12305), False, 'from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings\n'), ((12919, 12936), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12929, 12936), False, 'import json\n'), ((4284, 4638), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."\n , UserWarning, stacklevel=2)\n', (4288, 4638), False, 'from warnings import warn\n'), ((5177, 5216), 'json.dumps', 'json.dumps', (["pre_col_options['indexing']"], {}), "(pre_col_options['indexing'])\n", (5187, 5216), False, 'import json\n'), ((5237, 5553), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."\n , UserWarning, stacklevel=2)\n', (5241, 5553), False, 'from warnings import warn\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.indices.managed.google.generativeai.set_google_config",
"llama_index.legacy.schema.Document",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus"
] | [((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (759, 798), False, 'from unittest.mock import MagicMock, patch\n'), ((1012, 1071), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1030, 1071), False, 'import pytest\n'), ((1073, 1144), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1078, 1144), False, 'from unittest.mock import MagicMock, patch\n'), ((1402, 1461), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1420, 1461), False, 'import pytest\n'), ((1463, 1537), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1468, 1537), False, 'from unittest.mock import MagicMock, patch\n'), ((2137, 2196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2155, 2196), False, 'import pytest\n'), ((2198, 2272), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2203, 2272), False, 'from unittest.mock import MagicMock, patch\n'), ((2274, 2350), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2279, 2350), False, 'from unittest.mock import MagicMock, patch\n'), ((2352, 2437), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2357, 2437), False, 'from unittest.mock import MagicMock, patch\n'), ((2434, 2507), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2439, 2507), False, 'from unittest.mock import MagicMock, patch\n'), ((4398, 4457), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4416, 4457), False, 'import pytest\n'), ((4459, 4532), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4464, 4532), False, 'from unittest.mock import MagicMock, patch\n'), ((4534, 4611), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4539, 4611), False, 'from unittest.mock import MagicMock, patch\n'), ((4613, 4684), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4618, 4684), False, 'from unittest.mock import MagicMock, patch\n'), ((570, 671), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (587, 671), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((868, 920), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (885, 920), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((934, 953), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (951, 953), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1252, 1284), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1264, 1284), True, 'import google.ai.generativelanguage as genai\n'), ((1308, 1348), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1331, 1348), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1805, 1862), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1830, 1862), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2913, 2940), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2936, 2940), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3037, 3085), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3051, 3085), True, 'import google.ai.generativelanguage as genai\n'), ((4874, 4906), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4886, 4906), True, 'import google.ai.generativelanguage as genai\n'), ((6624, 6664), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6647, 6664), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3531, 3565), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3539, 3565), False, 'from llama_index.legacy.schema import Document\n'), ((3579, 3612), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3587, 3612), False, 'from llama_index.legacy.schema import Document\n'), ((3208, 3264), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3219, 3264), True, 'import google.ai.generativelanguage as genai\n'), ((3369, 3425), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3380, 3425), True, 'import google.ai.generativelanguage as genai\n'), ((5155, 5194), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5170, 5194), True, 'import google.ai.generativelanguage as genai\n'), ((5431, 5452), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5441, 5452), True, 'import google.ai.generativelanguage as genai\n'), ((5775, 5889), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5819, 5889), True, 'import google.ai.generativelanguage as genai\n'), ((6237, 6351), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6281, 6351), True, 'import google.ai.generativelanguage as genai\n'), ((5611, 5651), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5621, 5651), True, 'import google.ai.generativelanguage as genai\n'), ((6103, 6134), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6113, 6134), True, 'import google.ai.generativelanguage as genai\n')] |
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class ComposableGraphQueryEngine(BaseQueryEngine):
"""Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = callback_manager_from_settings_or_context(
Settings, self._graph.service_context
)
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
| [
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.schema.NodeWithScore"
] | [((585, 620), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (610, 620), True, 'import llama_index.core.instrumentation as instrument\n'), ((1649, 1734), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'self._graph.service_context'], {}), '(Settings, self._graph.service_context\n )\n', (1690, 1734), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((4741, 4798), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (4754, 4798), False, 'from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode\n')] |
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
import llama_index.llms.llama_cpp
from langchain.embeddings import HuggingFaceEmbeddings
import config
llm = llama_index.llms.llama_cpp.LlamaCPP(
model_kwargs={"n_gpu_layers": 1},
)
embed_model = HuggingFaceEmbeddings(model_name=config.EMBEDDING_MODEL_URL)
# create a service context
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
# load documents
documents = SimpleDirectoryReader(
config.KNOWLEDGE_BASE_PATH
).load_data()
# create vector store index
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# ================== Querying ================== #
# set up query engine
query_engine = index.as_query_engine()
# query_engine = index.as_query_engine()
response = query_engine.query("Who are the authors of this paper?")
print(response) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((431, 491), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'config.EMBEDDING_MODEL_URL'}), '(model_name=config.EMBEDDING_MODEL_URL)\n', (452, 491), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((538, 600), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (566, 600), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((747, 822), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (778, 822), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((642, 691), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['config.KNOWLEDGE_BASE_PATH'], {}), '(config.KNOWLEDGE_BASE_PATH)\n', (663, 691), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n')] |
import time
import llama_index
from atlassian import Bitbucket
import os
import sys
sys.path.append('../')
import local_secrets as secrets
start_time = time.time()
stash = Bitbucket('https://git.techstyle.net', token=secrets.stash_token)
os.environ['OPENAI_API_KEY'] = secrets.techstyle_openai_key
project ='DATASICENCE'
repo = stash.get_repo(project, 'brand-analytics')
length_cutoff = 100000
for repo in stash.repo_list(project):
count = 0
repo_slug = repo['slug']
files = stash.get_file_list(project, repo_slug)
index = llama_index.GPTSimpleVectorIndex([])
index_file = f'./stash_index/{project}_{repo_slug}.json'
if os.path.isfile(index_file):
continue
for file in files:
if file[-3:] not in ['.py']:
continue
try:
count = count + 1
url = f"https://git.techstyle.net/projects/{project}/repos/{repo_slug}/browse/{file}"
code = str(stash.get_content_of_file(project, repo_slug, file))
code = code[2:len(code)-1].replace("\\n", '\n')
print(file, len(code))
if len(code) > length_cutoff:
print(f'{repo_slug} {file} size {len(code)}, truncating')
code = code[0:length_cutoff]
content = f"Stash Project: {project}\nStash Repository: {repo_slug}\nStash URL: {url}\nStash Code:\n {code}"
index.insert(llama_index.Document(content))
except Exception as e:
print(f'Error {e} on {repo_slug} {file}')
index.save_to_disk(index_file)
print(f'Done, {count} files in repo {repo_slug} saved to index in {round(time.time() - start_time, 0)} seconds.')
# projects = stash.project_list()
# for project in projects:
# print(project['key'])
# repos = stash.repo_list('DataScience')
# for repo in repos:
# print(repo['slug'])
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.Document"
] | [((84, 106), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (99, 106), False, 'import sys\n'), ((153, 164), 'time.time', 'time.time', ([], {}), '()\n', (162, 164), False, 'import time\n'), ((173, 238), 'atlassian.Bitbucket', 'Bitbucket', (['"""https://git.techstyle.net"""'], {'token': 'secrets.stash_token'}), "('https://git.techstyle.net', token=secrets.stash_token)\n", (182, 238), False, 'from atlassian import Bitbucket\n'), ((540, 576), 'llama_index.GPTSimpleVectorIndex', 'llama_index.GPTSimpleVectorIndex', (['[]'], {}), '([])\n', (572, 576), False, 'import llama_index\n'), ((645, 671), 'os.path.isfile', 'os.path.isfile', (['index_file'], {}), '(index_file)\n', (659, 671), False, 'import os\n'), ((1390, 1419), 'llama_index.Document', 'llama_index.Document', (['content'], {}), '(content)\n', (1410, 1419), False, 'import llama_index\n'), ((1618, 1629), 'time.time', 'time.time', ([], {}), '()\n', (1627, 1629), False, 'import time\n')] |
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index.vector_stores.qdrant import QdrantVectorStore
import llama_index
llama_index.set_global_handler("simple")
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mistral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query("Does the author like web frameworks? Give details.")
print(response)
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Ollama",
"llama_index.set_global_handler",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((210, 250), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (240, 250), False, 'import llama_index\n'), ((294, 342), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (320, 342), False, 'import qdrant_client\n'), ((364, 422), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (381, 422), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((450, 473), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mistral"""'}), "(model='mistral')\n", (456, 473), False, 'from llama_index.llms import Ollama\n'), ((492, 550), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (520, 550), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((598, 696), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (632, 696), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
## main function of AWS Lambda function
import llama_index
from llama_index import download_loader
import boto3
import json
import urllib.parse
from llama_index import SimpleDirectoryReader
def main(event, context):
# extracting s3 bucket and key information from SQS message
print(event)
s3_info = json.loads(event['Records'][0]['body'])
bucket_name = s3_info['Records'][0]['s3']['bucket']['name']
object_key = urllib.parse.unquote_plus(s3_info['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
# the first approach to rea =d the content of uploaded file.
S3Reader = download_loader("S3Reader", custom_path='/tmp/llamahub_modules')
loader = S3Reader(bucket=bucket_name, key=object_key)
documents = loader.load_data()
# the second approach to read the content of uploaded file
# Creating an S3 client
# s3_client = boto3.client('s3')
# response = s3_client.get_object(Bucket=bucket_name, Key=object_key)
# file_content = response['Body'].read().decode('utf-8')
# save the file content to /tmp folder
# tmp_file_path = f"/tmp/{object_key.split('/')[-1]}"
# with open(tmp_file_path, "w") as f:
# tmp_file_path.write(file_content)
# reader = SimpleDirectoryReader(input_files=tmp_file_path)
# doc = reader.load_data()
# print(f"Loaded {len(doc)} doc")
## TODO
# ReIndex or Create New Index from document
# Update or Insert into VectoDatabase
# (Optional) Update or Insert into DocStorage DB
# Update or Insert index to MongoDB
# Can have Ingestion Pipeline with Redis Cache
return {
'statusCode': 200
}
# # creating an index
except Exception as e:
print(f"Error reading the file {object_key}: {str(e)}")
return {
'statusCode': 500,
'body': json.dumps('Error reading the file')
} | [
"llama_index.download_loader"
] | [((313, 352), 'json.loads', 'json.loads', (["event['Records'][0]['body']"], {}), "(event['Records'][0]['body'])\n", (323, 352), False, 'import json\n'), ((628, 692), 'llama_index.download_loader', 'download_loader', (['"""S3Reader"""'], {'custom_path': '"""/tmp/llamahub_modules"""'}), "('S3Reader', custom_path='/tmp/llamahub_modules')\n", (643, 692), False, 'from llama_index import download_loader\n'), ((1963, 1999), 'json.dumps', 'json.dumps', (['"""Error reading the file"""'], {}), "('Error reading the file')\n", (1973, 1999), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
import json
from typing import Dict, List
import llama_index.query_engine
from llama_index import ServiceContext, QueryBundle
from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager
from llama_index.indices.base import BaseIndex
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.llms.base import LLM
from llama_index.prompts.mixin import PromptMixinType
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.selectors import LLMSingleSelector
from llama_index.tools import QueryEngineTool
from common.config import DEBUG, LLM_CACHE_ENABLED
from common.llm import llm_predict, create_llm
from common.prompt import CH_SINGLE_SELECT_PROMPT_TMPL
from common.utils import ObjectEncoder
from query_todo.query_engine import load_indices
from query_todo.compose import create_compose_query_engine
class EchoNameEngine(BaseQueryEngine):
def __init__(self, name: str, callback_manager: CallbackManager = None):
self.name = name
super().__init__(callback_manager)
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(f"我是{self.name}")
class LlmQueryEngine(BaseQueryEngine):
def __init__(self, llm: LLM, callback_manager: CallbackManager):
self.llm = llm
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(llm_predict(self.llm, query_bundle.query_str))
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def create_route_query_engine(query_engines: List[BaseQueryEngine], descriptions: List[str],
service_context: ServiceContext = None):
assert len(query_engines) == len(descriptions)
# TODO
# 根据传入的多个query_engines和descriptions创建 RouteQueryEngine,实现query engine 的路由
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
class Chatter:
def __init__(self):
if DEBUG:
debug_handler = LlamaDebugHandler()
cb_manager = CallbackManager([debug_handler])
else:
debug_handler = None
cb_manager = CallbackManager()
llm = create_llm(cb_manager, LLM_CACHE_ENABLED)
service_context = ServiceContext.from_defaults(
llm=llm,
callback_manager=cb_manager
)
self.cb_manager = cb_manager
self.city_indices: Dict[str, List[BaseIndex]] = load_indices(service_context)
self.service_context = service_context
self.llm = llm
self.debug_handler = debug_handler
self.query_engine = self.create_query_engine()
def create_query_engine(self):
index_query_engine = create_compose_query_engine(self.city_indices, self.service_context)
index_summary = f"提供 {', '.join(self.city_indices.keys())} 这几个城市的相关信息"
llm_query_engine = LlmQueryEngine(llm=self.llm, callback_manager=self.cb_manager)
llm_summary = f"提供其他所有信息"
# 实现意图识别,把不同的query路由到不同的query_engine上,实现聊天和城市信息查询两个功能的分流
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
def _print_and_flush_debug_info(self):
if self.debug_handler:
for event in self.debug_handler.get_events():
if event.event_type in (CBEventType.LLM, CBEventType.RETRIEVE):
print(
f"[DebugInfo] event_type={event.event_type}, content={json.dumps(event.payload, ensure_ascii=False, cls=ObjectEncoder)}")
self.debug_handler.flush_event_logs()
def chat(self, query):
response = self.query_engine.query(query)
self._print_and_flush_debug_info()
return response
| [
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.response.schema.Response",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager"
] | [((1298, 1324), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1306, 1324), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2530, 2571), 'common.llm.create_llm', 'create_llm', (['cb_manager', 'LLM_CACHE_ENABLED'], {}), '(cb_manager, LLM_CACHE_ENABLED)\n', (2540, 2571), False, 'from common.llm import llm_predict, create_llm\n'), ((2598, 2664), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'callback_manager': 'cb_manager'}), '(llm=llm, callback_manager=cb_manager)\n', (2626, 2664), False, 'from llama_index import ServiceContext, QueryBundle\n'), ((2792, 2821), 'query_todo.query_engine.load_indices', 'load_indices', (['service_context'], {}), '(service_context)\n', (2804, 2821), False, 'from query_todo.query_engine import load_indices\n'), ((3055, 3123), 'query_todo.compose.create_compose_query_engine', 'create_compose_query_engine', (['self.city_indices', 'self.service_context'], {}), '(self.city_indices, self.service_context)\n', (3082, 3123), False, 'from query_todo.compose import create_compose_query_engine\n'), ((1683, 1728), 'common.llm.llm_predict', 'llm_predict', (['self.llm', 'query_bundle.query_str'], {}), '(self.llm, query_bundle.query_str)\n', (1694, 1728), False, 'from common.llm import llm_predict, create_llm\n'), ((2348, 2367), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2393, 2425), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[debug_handler]'], {}), '([debug_handler])\n', (2408, 2425), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2498, 2515), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (2513, 2515), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((3855, 3919), 'json.dumps', 'json.dumps', (['event.payload'], {'ensure_ascii': '(False)', 'cls': 'ObjectEncoder'}), '(event.payload, ensure_ascii=False, cls=ObjectEncoder)\n', (3865, 3919), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events
to help us improve LanceDB. These diagnostics are used only for error reporting and no
data is collected. You can find more about diagnosis on our docs:
https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
# Copyright (c) Hegel AI, Inc.
# All rights reserved.
#
# This source code's license can be found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import pandas as pd
from typing import Callable, Optional
try:
import lancedb
from lancedb.embeddings import with_embeddings
except ImportError:
lancedb = None
import logging
from time import perf_counter
from .experiment import Experiment
from ._utils import _get_dynamic_columns
VALID_TASKS = [""]
def query_builder(
table: "lancedb.Table",
embed_fn: Callable,
text: str,
metric: str = "cosine",
limit: int = 3,
filter: str = None,
nprobes: int = None,
refine_factor: int = None,
):
if nprobes is not None or refine_factor is not None:
warnings.warn(
"`nprobes` and `refine_factor` are not used by the default `query_builder`. "
"Feel free to open an issue to request adding support for them."
)
query = table.search(embed_fn(text)[0]).metric(metric)
if filter:
query = query.where(filter)
return query.limit(limit).to_df()
class LanceDBExperiment(Experiment):
r"""
Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments.
You can query from an existing table, or create a new one (and insert documents into it) during
the experiment.
Args:
uri (str): LanceDB uri to interact with your database. Default is "lancedb"
table_name (str): the table that you will get or create. Default is "table"
use_existing_table (bool): determines whether to create a new collection or use
an existing one
embedding_fns (list[Callable]): embedding functions to test in the experiment
by default only uses the default one in LanceDB
query_args (dict[str, list]): parameters used to query the table
Each value is expected to be a list to create all possible combinations
data (Optional[list[dict]]): documents or embeddings that will be added to
the newly created table
text_col_name (str): name of the text column in the table. Default is "text"
clean_up (bool): determines whether to drop the table after the experiment ends
"""
def __init__(
self,
embedding_fns: dict[str, Callable],
query_args: dict[str, list],
uri: str = "lancedb",
table_name: str = "table",
use_existing_table: bool = False,
data: Optional[list[dict]] = None,
text_col_name: str = "text",
clean_up: bool = False,
):
if lancedb is None:
raise ModuleNotFoundError(
"Package `lancedb` is required to be installed to use this experiment."
"Please use `pip install lancedb` to install the package"
)
self.table_name = table_name
self.use_existing_table = use_existing_table
self.embedding_fns = embedding_fns
if use_existing_table and data:
raise RuntimeError("You can either use an existing collection or create a new one during the experiment.")
if not use_existing_table and data is None:
raise RuntimeError("If you choose to create a new collection, you must also add to it.")
self.data = data if data is not None else []
self.argument_combos: list[dict] = []
self.text_col_name = text_col_name
self.db = lancedb.connect(uri)
self.completion_fn = self.lancedb_completion_fn
self.query_args = query_args
self.clean_up = clean_up
super().__init__()
def prepare(self):
for combo in itertools.product(*self.query_args.values()):
self.argument_combos.append(dict(zip(self.query_args.keys(), combo)))
def run(self, runs: int = 1):
input_args = [] # This will be used to construct DataFrame table
results = []
latencies = []
if not self.argument_combos:
logging.info("Preparing first...")
self.prepare()
for emb_fn_name, emb_fn in self.embedding_fns.items():
if self.use_existing_table: # Use existing table
table = self.db.open_table(self.table_name)
if not table:
raise RuntimeError(f"Table {self.table_name} does not exist.")
else: # Create table and insert data
data = with_embeddings(emb_fn, self.data, self.text_col_name)
table = self.db.create_table(self.table_name, data, mode="overwrite")
# Query from table
for query_arg_dict in self.argument_combos:
query_args = query_arg_dict.copy()
for _ in range(runs):
start = perf_counter()
results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args))
latencies.append(perf_counter() - start)
query_args["emb_fn"] = emb_fn_name # Saving for visualization
input_args.append(query_args)
# Clean up
if self.clean_up:
self.db.drop_table(self.table_name)
self._construct_result_dfs(input_args, results, latencies)
def lancedb_completion_fn(self, table, embedding_fn, **kwargs):
return query_builder(table, embedding_fn, **kwargs)
def _construct_result_dfs(
self,
input_args: list[dict[str, object]],
results: list[dict[str, object]],
latencies: list[float],
):
r"""
Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics).
This version only extract the most relevant objects returned by LanceDB.
Args:
input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of
input argument that was passed into the model
results (list[dict[str, object]]): list of responses from the model
latencies (list[float]): list of latency measurements
"""
# `input_arg_df` contains all all input args
input_arg_df = pd.DataFrame(input_args)
# `dynamic_input_arg_df` contains input args that has more than one unique values
dynamic_input_arg_df = _get_dynamic_columns(input_arg_df)
# `response_df` contains the extracted response (often being the text response)
response_dict = dict()
response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results]
response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results]
response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results]
response_df = pd.DataFrame(response_dict)
# `result_df` contains everything returned by the completion function
result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1)
# `score_df` contains computed metrics (e.g. latency, evaluation metrics)
self.score_df = pd.DataFrame({"latency": latencies})
# `partial_df` contains some input arguments, extracted responses, and score
self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1)
# `full_df` contains all input arguments, responses, and score
self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1)
@staticmethod
def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["ids"]
@staticmethod
def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["_distance"]
@staticmethod
def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["text"]
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')] |
import os
from pathlib import Path
from tqdm import tqdm
from lancedb import connect
from pydantic import BaseModel
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from typing import Iterable
DB_PATH = Path(os.getcwd(), "db")
DATA_PATH = Path(os.getcwd(), "data")
DB_TABLE = "paul_graham"
class Document(BaseModel):
id: int
text: str
filename: str
openai = get_registry().get("openai").create(name="text-embedding-3-large", dim=256)
class TextChunk(LanceModel):
id: int
doc_id: int
chunk_num: int
start_pos: int
end_pos: int
text: str = openai.SourceField()
# For some reason if we call openai.ndim(), it returns 1536 instead of 256 like we want
vector: Vector(openai.ndims()) = openai.VectorField(default=None)
def chunk_text(
documents: Iterable[Document], window_size: int = 1024, overlap: int = 0
):
id = 0
for doc in documents:
for chunk_num, start_pos in enumerate(
range(0, len(doc.text), window_size - overlap)
):
# TODO: Fix up this and use a Lance Model instead - have reached out to the team to ask for some help
yield {
"id": id,
"doc_id": doc.id,
"chunk_num": chunk_num,
"start_pos": start_pos,
"end_pos": start_pos + window_size,
"text": doc.text[start_pos : start_pos + window_size],
}
id += 1
def read_file_content(path: Path, file_suffix: str) -> Iterable[Document]:
for i, file in enumerate(path.iterdir()):
if file.suffix != file_suffix:
continue
yield Document(id=i, text=file.read_text(), filename=file.name)
def batch_chunks(chunks, batch_size=10):
batch = []
for item in chunks:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def main():
assert "OPENAI_API_KEY" in os.environ, "OPENAI_API_KEY is not set"
db = connect(DB_PATH)
table = db.create_table(DB_TABLE, schema=TextChunk, mode="overwrite")
documents = read_file_content(DATA_PATH, file_suffix=".md")
chunks = chunk_text(documents)
batched_chunks = batch_chunks(chunks, 20)
for chunk_batch in tqdm(batched_chunks):
table.add(chunk_batch)
if __name__ == "__main__":
main()
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((253, 264), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (262, 264), False, 'import os\n'), ((289, 300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (298, 300), False, 'import os\n'), ((2068, 2084), 'lancedb.connect', 'connect', (['DB_PATH'], {}), '(DB_PATH)\n', (2075, 2084), False, 'from lancedb import connect\n'), ((2329, 2349), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2333, 2349), False, 'from tqdm import tqdm\n'), ((419, 433), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (431, 433), False, 'from lancedb.embeddings import get_registry\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from pathlib import Path
from typing import Any, Callable
from lancedb import DBConnection as LanceDBConnection
from lancedb import connect as lancedb_connect
from lancedb.table import Table as LanceDBTable
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
return super().model_post_init(__context)
def query(self, question: str) -> str:
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
| [
"lancedb.connect"
] | [((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')] |
import logging
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Type
import lancedb
import pandas as pd
from dotenv import load_dotenv
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import LanceVectorQueryBuilder
from pydantic import BaseModel, ValidationError, create_model
from langroid.embedding_models.base import (
EmbeddingModel,
EmbeddingModelsConfig,
)
from langroid.embedding_models.models import OpenAIEmbeddingsConfig
from langroid.mytypes import Document, EmbeddingFunction
from langroid.utils.configuration import settings
from langroid.utils.pydantic_utils import (
dataframe_to_document_model,
dataframe_to_documents,
extend_document_class,
extra_metadata,
flatten_pydantic_instance,
flatten_pydantic_model,
nested_dict_from_flat,
)
from langroid.vector_store.base import VectorStore, VectorStoreConfig
logger = logging.getLogger(__name__)
class LanceDBConfig(VectorStoreConfig):
cloud: bool = False
collection_name: str | None = "temp"
storage_path: str = ".lancedb/data"
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
distance: str = "cosine"
# document_class is used to store in lancedb with right schema,
# and also to retrieve the right type of Documents when searching.
document_class: Type[Document] = Document
flatten: bool = False # flatten Document class into LanceSchema ?
class LanceDB(VectorStore):
def __init__(self, config: LanceDBConfig = LanceDBConfig()):
super().__init__(config)
self.config: LanceDBConfig = config
emb_model = EmbeddingModel.create(config.embedding)
self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn()
self.embedding_dim = emb_model.embedding_dims
self.host = config.host
self.port = config.port
self.is_from_dataframe = False # were docs ingested from a dataframe?
self.df_metadata_columns: List[str] = [] # metadata columns from dataframe
self._setup_schemas(config.document_class)
load_dotenv()
if self.config.cloud:
logger.warning(
"LanceDB Cloud is not available yet. Switching to local storage."
)
config.cloud = False
else:
try:
self.client = lancedb.connect(
uri=config.storage_path,
)
except Exception as e:
new_storage_path = config.storage_path + ".new"
logger.warning(
f"""
Error connecting to local LanceDB at {config.storage_path}:
{e}
Switching to {new_storage_path}
"""
)
self.client = lancedb.connect(
uri=new_storage_path,
)
# Note: Only create collection if a non-null collection name is provided.
# This is useful to delay creation of vecdb until we have a suitable
# collection name (e.g. we could get it from the url or folder path).
if config.collection_name is not None:
self.create_collection(
config.collection_name, replace=config.replace_collection
)
def _setup_schemas(self, doc_cls: Type[Document] | None) -> None:
doc_cls = doc_cls or self.config.document_class
self.unflattened_schema = self._create_lance_schema(doc_cls)
self.schema = (
self._create_flat_lance_schema(doc_cls)
if self.config.flatten
else self.unflattened_schema
)
def clear_empty_collections(self) -> int:
coll_names = self.list_collections()
n_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
if nr == 0:
n_deletes += 1
self.client.drop_table(name)
return n_deletes
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = [
c for c in self.list_collections(empty=True) if c.startswith(prefix)
]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
n_empty_deletes = 0
n_non_empty_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
n_empty_deletes += nr == 0
n_non_empty_deletes += nr > 0
self.client.drop_table(name)
logger.warning(
f"""
Deleted {n_empty_deletes} empty collections and
{n_non_empty_deletes} non-empty collections.
"""
)
return n_empty_deletes + n_non_empty_deletes
def list_collections(self, empty: bool = False) -> List[str]:
"""
Returns:
List of collection names that have at least one vector.
Args:
empty (bool, optional): Whether to include empty collections.
"""
colls = self.client.table_names(limit=None)
if len(colls) == 0:
return []
if empty: # include empty tbls
return colls # type: ignore
counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls]
return [coll for coll, count in zip(colls, counts) if count > 0]
def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Create a subclass of LanceModel with fields:
- id (str)
- Vector field that has dims equal to
the embedding dimension of the embedding model, and a data field of type
DocClass.
- other fields from doc_cls
Args:
doc_cls (Type[Document]): A Pydantic model which should be a subclass of
Document, to be used as the type for the data field.
Returns:
Type[BaseModel]: A new Pydantic model subclassing from LanceModel.
Raises:
ValueError: If `n` is not a non-negative integer or if `DocClass` is not a
subclass of Document.
"""
if not issubclass(doc_cls, Document):
raise ValueError("DocClass must be a subclass of Document")
n = self.embedding_dim
# Prepare fields for the new model
fields = {"id": (str, ...), "vector": (Vector(n), ...)}
sorted_fields = dict(
sorted(doc_cls.__fields__.items(), key=lambda item: item[0])
)
# Add both statically and dynamically defined fields from doc_cls
for field_name, field in sorted_fields.items():
fields[field_name] = (field.outer_type_, field.default)
# Create the new model with dynamic fields
NewModel = create_model(
"NewModel", __base__=LanceModel, **fields
) # type: ignore
return NewModel # type: ignore
def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Flat version of the lance_schema, as nested Pydantic schemas are not yet
supported by LanceDB.
"""
lance_model = self._create_lance_schema(doc_cls)
FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel)
return FlatModel
def create_collection(self, collection_name: str, replace: bool = False) -> None:
"""
Create a collection with the given name, optionally replacing an existing
collection if `replace` is True.
Args:
collection_name (str): Name of the collection to create.
replace (bool): Whether to replace an existing collection
with the same name. Defaults to False.
"""
self.config.collection_name = collection_name
collections = self.list_collections()
if collection_name in collections:
coll = self.client.open_table(collection_name)
if coll.head().shape[0] > 0:
logger.warning(f"Non-empty Collection {collection_name} already exists")
if not replace:
logger.warning("Not replacing collection")
return
else:
logger.warning("Recreating fresh collection")
self.client.create_table(collection_name, schema=self.schema, mode="overwrite")
if settings.debug:
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
logger.setLevel(level)
def _maybe_set_doc_class_schema(self, doc: Document) -> None:
"""
Set the config.document_class and self.schema based on doc if needed
Args:
doc: an instance of Document, to be added to a collection
"""
extra_metadata_fields = extra_metadata(doc, self.config.document_class)
if len(extra_metadata_fields) > 0:
logger.warning(
f"""
Added documents contain extra metadata fields:
{extra_metadata_fields}
which were not present in the original config.document_class.
Trying to change document_class and corresponding schemas.
Overriding LanceDBConfig.document_class with an auto-generated
Pydantic class that includes these extra fields.
If this fails, or you see odd results, it is recommended that you
define a subclass of Document, with metadata of class derived from
DocMetaData, with extra fields defined via
`Field(..., description="...")` declarations,
and set this document class as the value of the
LanceDBConfig.document_class attribute.
"""
)
doc_cls = extend_document_class(doc)
self.config.document_class = doc_cls
self._setup_schemas(doc_cls)
def add_documents(self, documents: Sequence[Document]) -> None:
super().maybe_add_ids(documents)
colls = self.list_collections(empty=True)
if len(documents) == 0:
return
embedding_vecs = self.embedding_fn([doc.content for doc in documents])
coll_name = self.config.collection_name
if coll_name is None:
raise ValueError("No collection name set, cannot ingest docs")
self._maybe_set_doc_class_schema(documents[0])
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it,
self.create_collection(coll_name, replace=True)
ids = [str(d.id()) for d in documents]
# don't insert all at once, batch in chunks of b,
# else we get an API error
b = self.config.batch_size
def make_batches() -> Generator[List[BaseModel], None, None]:
for i in range(0, len(ids), b):
batch = [
self.unflattened_schema(
id=ids[i + j],
vector=embedding_vecs[i + j],
**doc.dict(),
)
for j, doc in enumerate(documents[i : i + b])
]
if self.config.flatten:
batch = [
flatten_pydantic_instance(instance) # type: ignore
for instance in batch
]
yield batch
tbl = self.client.open_table(self.config.collection_name)
try:
tbl.add(make_batches())
except Exception as e:
logger.error(
f"""
Error adding documents to LanceDB: {e}
POSSIBLE REMEDY: Delete the LancdDB storage directory
{self.config.storage_path} and try again.
"""
)
def add_dataframe(
self,
df: pd.DataFrame,
content: str = "content",
metadata: List[str] = [],
) -> None:
"""
Add a dataframe to the collection.
Args:
df (pd.DataFrame): A dataframe
content (str): The name of the column in the dataframe that contains the
text content to be embedded using the embedding model.
metadata (List[str]): A list of column names in the dataframe that contain
metadata to be stored in the database. Defaults to [].
"""
self.is_from_dataframe = True
actual_metadata = metadata.copy()
self.df_metadata_columns = actual_metadata # could be updated below
# get content column
content_values = df[content].values.tolist()
embedding_vecs = self.embedding_fn(content_values)
# add vector column
df["vector"] = embedding_vecs
if content != "content":
# rename content column to "content", leave existing column intact
df = df.rename(columns={content: "content"}, inplace=False)
if "id" not in df.columns:
docs = dataframe_to_documents(df, content="content", metadata=metadata)
ids = [str(d.id()) for d in docs]
df["id"] = ids
if "id" not in actual_metadata:
actual_metadata += ["id"]
colls = self.list_collections(empty=True)
coll_name = self.config.collection_name
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it
# and set new schema from df
self.client.create_table(
self.config.collection_name,
data=df,
mode="overwrite",
)
doc_cls = dataframe_to_document_model(
df,
content=content,
metadata=actual_metadata,
exclude=["vector"],
)
self.config.document_class = doc_cls # type: ignore
self._setup_schemas(doc_cls) # type: ignore
else:
# collection exists and is not empty, so append to it
tbl = self.client.open_table(self.config.collection_name)
tbl.add(df)
def delete_collection(self, collection_name: str) -> None:
self.client.drop_table(collection_name)
def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]:
if self.is_from_dataframe:
df = result.to_pandas()
return dataframe_to_documents(
df,
content="content",
metadata=self.df_metadata_columns,
doc_cls=self.config.document_class,
)
else:
records = result.to_arrow().to_pylist()
return self._records_to_docs(records)
def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]:
if self.config.flatten:
docs = [
self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records
]
else:
try:
docs = [self.schema(**rec) for rec in records]
except ValidationError as e:
raise ValueError(
f"""
Error validating LanceDB result: {e}
HINT: This could happen when you're re-using an
existing LanceDB store with a different schema.
Try deleting your local lancedb storage at `{self.config.storage_path}`
re-ingesting your documents and/or replacing the collections.
"""
)
doc_cls = self.config.document_class
doc_cls_field_names = doc_cls.__fields__.keys()
return [
doc_cls(
**{
field_name: getattr(doc, field_name)
for field_name in doc_cls_field_names
}
)
for doc in docs
]
def get_all_documents(self, where: str = "") -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
tbl = self.client.open_table(self.config.collection_name)
pre_result = tbl.search(None).where(where or None).limit(None)
return self._lance_result_to_docs(pre_result)
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
_ids = [str(id) for id in ids]
tbl = self.client.open_table(self.config.collection_name)
docs = []
for _id in _ids:
results = self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'"))
if len(results) > 0:
docs.append(results[0])
return docs
def similar_texts_with_scores(
self,
text: str,
k: int = 1,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_fn([text])[0]
tbl = self.client.open_table(self.config.collection_name)
result = (
tbl.search(embedding).metric(self.config.distance).where(where).limit(k)
)
docs = self._lance_result_to_docs(result)
# note _distance is 1 - cosine
if self.is_from_dataframe:
scores = [
1 - rec["_distance"] for rec in result.to_pandas().to_dict("records")
]
else:
scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()]
if len(docs) == 0:
logger.warning(f"No matches found for {text}")
return []
if settings.debug:
logger.info(f"Found {len(docs)} matches, max score: {max(scores)}")
doc_score_pairs = list(zip(docs, scores))
self.show_if_debug(doc_score_pairs)
return doc_score_pairs
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((911, 938), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (928, 938), False, 'import logging\n'), ((1125, 1149), 'langroid.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1147, 1149), False, 'from langroid.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1627, 1666), 'langroid.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1648, 1666), False, 'from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2080, 2093), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2091, 2093), False, 'from dotenv import load_dotenv\n'), ((7037, 7092), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (7049, 7092), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7469, 7527), 'langroid.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7491, 7527), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((9064, 9111), 'langroid.utils.pydantic_utils.extra_metadata', 'extra_metadata', (['doc', 'self.config.document_class'], {}), '(doc, self.config.document_class)\n', (9078, 9111), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((10124, 10150), 'langroid.utils.pydantic_utils.extend_document_class', 'extend_document_class', (['doc'], {}), '(doc)\n', (10145, 10150), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13444, 13508), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (13466, 13508), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14182, 14280), 'langroid.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (14209, 14280), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14943, 15064), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (14965, 15064), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2342, 2382), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2357, 2382), False, 'import lancedb\n'), ((6637, 6646), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6643, 6646), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2806, 2843), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2821, 2843), False, 'import lancedb\n'), ((11696, 11731), 'langroid.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (11721, 11731), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((15432, 15458), 'langroid.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (15453, 15458), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')] |
import json
import lancedb
from lancedb.pydantic import Vector, LanceModel
from datetime import datetime
# import pyarrow as pa
TABLE_NAME = "documents"
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
# vector: list of vectors
# file_name: name of file
# file_path: path of file
# id
# updated_at
# created_at
class Document(LanceModel):
id: str
file_name: str
file_path: str
created_at: datetime
updated_at: datetime
vector: Vector(768) # Palm Embeddings size
try:
table = db.create_table(TABLE_NAME, schema=Document)
except OSError:
print("table exists")
table = db.open_table(TABLE_NAME)
except Exception as inst:
# Print out the type of exceptions.
print(type(inst))
print(inst.args)
print(inst)
if True:
now = datetime.now()
# Idempotent upsert. Alternatively we can delete first, then insert.
table.add(
[
Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=now,
updated_at=now,
vector=[i for i in range(768)],
)
]
)
table.delete(f'id="1" AND created_at != timestamp "{now}"')
if False:
table.update(
where='id="1"',
values=Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=datetime.now(),
updated_at=datetime.now(),
vector=[i for i in range(768)],
),
)
vector = [i for i in range(768)]
result = table.search(vector).limit(2).to_list()
for item in result:
print(item)
# print(json.dumps(item, indent=2))
print(db[TABLE_NAME].head())
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((189, 209), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (204, 209), False, 'import lancedb\n'), ((461, 472), 'lancedb.pydantic.Vector', 'Vector', (['(768)'], {}), '(768)\n', (467, 472), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((786, 800), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (798, 800), False, 'from datetime import datetime\n'), ((1421, 1435), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1433, 1435), False, 'from datetime import datetime\n'), ((1460, 1474), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1472, 1474), False, 'from datetime import datetime\n')] |
import json
from sentence_transformers import SentenceTransformer
from pydantic.main import ModelMetaclass
from pathlib import Path
import pandas as pd
import sqlite3
from uuid import uuid4
import lancedb
encoder = SentenceTransformer('all-MiniLM-L6-v2')
data_folder = Path('data/collections')
config_file = Path('data/config/indexes.yaml')
index_folder = Path('indexes')
lance_folder = Path('indexes')
lance_folder.mkdir(parents=True, exist_ok=True)
sqlite_folder = Path('data/indexes/')
class LanceDBDocument():
def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None):
self.document = self.fill_missing_fields(document, text, title, tags, date)
# self.text = document[text]
# self.tags = document[tags] if tags is not None else list()
# self.date = document[date] if date is not None else None
self.file_path = file_path
self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]}
self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid']
self.save_uuids = list()
self.sqlite_fields = list()
self.lance_exclude = list()
def fill_missing_fields(self, document, text, title, tags, date):
if title not in document:
self.title = ''
else:
self.title = document[title]
if text not in document:
self.text = ''
else:
self.text = document[text]
if date not in document:
self.date = ''
else:
self.date = document[date]
if tags not in document:
self.tags = list()
else:
self.tags = document[tags]
def create_json_document(self, text, uuids=None):
"""Creates a custom dictionary object that can be used for both sqlite and lancedb
The full document is always stored in sqlite where fixed fields are:
title
text
date
filepath
document_uuid - used for retrieval from lancedb results
Json field contains the whole document for retrieval and display
Lancedb only gets searching text, vectorization of that, and filter fields
"""
_document = {'title':self.title,
'text':text,
'tags':self.tags,
'date':self.date,
'file_path':str(self.file_path),
'uuid':self.uuid,
'metadata': self.metadata}
self._enforce_tags_schema()
for field in ['title','date','file_path']:
self.enforce_string_schema(field, _document)
return _document
def enforce_string_schema(self, field, test_document):
if not isinstance(test_document[field], str):
self.lance_exclude.append(field)
def _enforce_tags_schema(self):
# This enforces a simple List[str] format for the tags to match what lancedb can use for filtering
# If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval
if isinstance(self.tags, list):
tags_are_list = True
for _tag in self.tags:
if not isinstance(_tag, str):
tags_are_list = False
break
if not tags_are_list:
self.lance_exclude.append('tags')
def return_document(self):
document = self.create_json_document(self.text)
return document
class SqlLiteIngest():
def __init__(self, documents, source_file, db_location, index_name, overwrite):
self.documents = documents
self.source_file = source_file
self.db_location = db_location
self.index_name = index_name
self.overwrite = overwrite
def initialize(self):
self.connection = sqlite3.connect(self.db_location)
if self.overwrite:
self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""")
table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall()
if len(table_exists) == 0:
self.connection.execute(f"""
CREATE TABLE {self.index_name}(
id INTEGER PRIMARY KEY NOT NULL,
uuid STRING NOT NULL,
text STRING NOT NULL,
title STRING,
date STRING,
source_file STRING,
metadata JSONB);""")
def insert(self, document):
self.connection.execute(f"""INSERT INTO
{self.index_name} (uuid, text, title, date, source_file, metadata)
VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}',
'{document.title.replace("'","''")}', '{document.date.replace("'","''")}',
'{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""")
def bulk_insert(self):
for document in self.documents:
self.insert(document)
self.connection.commit()
self.connection.close()
from lancedb.pydantic import LanceModel, Vector, List
class LanceDBSchema384(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(384)
class LanceDBSchema512(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(512)
class LanceDBIngest():
def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema):
self.documents = documents
self.lance_location = lance_location
self.index_name = index_name
self.overwrite = overwrite
self.encoder = encoder
self.schema = schema
def initialize(self):
self.db = lancedb.connect(self.lance_location)
existing_tables = self.db.table_names()
self.documents = [self.prep_documents(document) for document in self.documents]
if self.overwrite:
self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema())
else:
if self.index_name in existing_tables:
self.table = self.db.open_table(self.index_name)
self.table.add(self.documents)
else:
self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema())
def prep_documents(self, document):
lance_document = dict()
lance_document['text'] = document.text
lance_document['vector'] = self.encoder.encode(document.text)
lance_document['uuid'] = document.uuid
lance_document['title'] = document.title
lance_document['tags'] = document.tags
return lance_document
def insert(self, document):
document['vector'] = self.encoder.encode(document.text)
self.table.add(document)
def bulk_insert(self, create_vectors=False):
if create_vectors:
self.table.create_index(vector_column_name='vector', metric='cosine')
self.table.create_fts_index(field_names=['title','text'], replace=True)
return self.table
class IndexDocuments():
def __init__(self,field_mapping, source_file, index_name, overwrite):
self.field_mapping = field_mapping
self.source_file = source_file
self.index_name = index_name
self.overwrite = overwrite
def open_json(self):
with open(self.source_file, 'r') as f:
self.data = json.load(f)
print(self.data)
def open_csv(self):
self.data = pd.read_csv(self.source_file)
def create_document(self, document):
document = LanceDBDocument(document,
text=self.field_mapping['text'],
title=self.field_mapping['title'],
tags=self.field_mapping['tags'],
date=self.field_mapping['date'],
fields=list(document.keys()),
file_path=self.source_file
)
return document
def create_documents(self):
self.documents = [self.create_document(document) for document in self.data]
def ingest(self, overwrite=False):
# lance_path = Path(f'../indexes/lance')
lance_folder.mkdir(parents=True, exist_ok=True)
lance_ingest = LanceDBIngest(documents=self.documents,
lance_location=lance_folder,
# field_mapping=self.field_mapping,
index_name=self.index_name,
overwrite=self.overwrite,
encoder=encoder,
schema=LanceDBSchema384)
lance_ingest.initialize()
if len(self.documents) <= 256:
_table = lance_ingest.bulk_insert(create_vectors=False)
else:
_table = lance_ingest.bulk_insert(create_vectors=True)
sql_path = sqlite_folder.joinpath('documents.sqlite')
sqlite_ingest = SqlLiteIngest(documents=self.documents,
source_file=self.source_file,
db_location=sql_path,
index_name=self.index_name,
overwrite=self.overwrite)
sqlite_ingest.initialize()
sqlite_ingest.bulk_insert()
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5306, 5317), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5312, 5317), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5430, 5441), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5436, 5441), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((3896, 3929), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (3911, 3929), False, 'import sqlite3\n'), ((5814, 5850), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (5829, 5850), False, 'import lancedb\n'), ((7670, 7699), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (7681, 7699), True, 'import pandas as pd\n'), ((7583, 7595), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7592, 7595), False, 'import json\n'), ((1035, 1042), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1040, 1042), False, 'from uuid import uuid4\n'), ((4948, 4977), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (4958, 4977), False, 'import json\n')] |
import os
import urllib.request
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from sentence_transformers import SentenceTransformer
import numpy as np
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E"
# Let's get the html off of a website.
fp = urllib.request.urlopen("file:////home/shaunak_joshi/gt/insuranceagent.html")
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Clean things up just a bit.
text = text.split("Introduction")[1]
#print(text)
#text = text.split("Location, Location, Location")[0]
#print(text)
#print(type(text))
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
# Let's checkout some of the chunks!
#for i in range(0, 10):
# print("Chunk", str(i+1))
# print("----------------------------")
# print(docs[i])
# print("")
# Let's take care of some of the formatting so it doesn't conflict with our
# typical prompt template structure
docs = [x.replace('#', '-') for x in docs]
# Now we need to embed these documents and put them into a "vector store" or
# "vector db" that we will use for semantic search and retrieval.
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([i,docs[i]])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the DB table and add the records.
db.create_table("linux", data=data)
table = db.open_table("linux")
table.add(data=data)
# Let's try to match a query to one of our documents.
#message = "What plays a crucial role in deciding insurance policies?"
#results = table.search(embed(message)).limit(5).to_pandas()
#print(results.head())
# Now let's augment our Q&A prompt with this external knowledge on-the-fly!!!
template = """### Instruction:
Read the below input context and respond with a short answer to the given question. Use only the information in the bel>
### Input:
Context: {context}
Question: {question}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
def rag_answer(message):
# Search the for relevant context
results = table.search(embed(message)).limit(5).to_pandas()
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = results['text'].values[0]
# Augment the prompt with the context
prompt = qa_prompt.format(context=doc_use, question=message)
# Get a response
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt
)
return result['choices'][0]['text']
response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old. Based on this information, generate three insights about the type of insurance policy the house will require and any other thing you find important. Keep the insights under 20 words each.")
print('')
print("RESPONSE:", response)
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((670, 691), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (689, 691), False, 'import html2text\n'), ((1001, 1056), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (1022, 1056), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1627, 1652), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1646, 1652), False, 'from sentence_transformers import SentenceTransformer\n'), ((1818, 1838), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1826, 1838), False, 'import os\n'), ((1863, 1883), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1878, 1883), False, 'import lancedb\n'), ((2025, 2074), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (2037, 2074), True, 'import pandas as pd\n'), ((2108, 2144), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (2123, 2144), False, 'from lancedb.embeddings import with_embeddings\n'), ((2827, 2901), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2841, 2901), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((3294, 3361), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3314, 3361), True, 'import predictionguard as pg\n')] |
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import EmbeddingFunctionRegistry
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("openai").create()
class Questions(LanceModel):
question: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((117, 157), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (155, 157), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
import logging
import os
import time
from functools import wraps
from pathlib import Path
from random import random, seed
import lancedb
import pyarrow as pa
import pyarrow.parquet as pq
import typer
from lancedb.db import LanceTable
log_level = os.environ.get("LOG_LEVEL", "info")
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s %(levelname)s | %(processName)s %(name)s | %(message)s",
)
logger = logging.getLogger(__name__)
app = typer.Typer()
V_SIZE = 256
DB_PATH = "benchmark"
DB_TABLE = "vectors"
DB_TABLE_SIZE = os.environ.get("DB_TABLE_SIZE", 100000)
Q_PATH = "query"
Q_SIZE = os.environ.get("Q_SIZE", 100)
Q_V = "v.parquet"
Q_KNN = "knn.parquet"
Q_ANN = "ann.parquet"
def timeit(func):
@wraps(func)
def f(*args, **kwargs):
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
total_time = end_time - start_time
logger.info(f"{func.__name__} {args} done in {total_time:.2f} secs")
return result
return f
def get_db():
if int(os.environ["AZURE"]) == 0:
f = Path(os.environ["DATA"])
f.mkdir(parents=True, exist_ok=True)
return lancedb.connect(f / DB_PATH)
else:
return lancedb.connect(
f"az://{os.environ['AZURE_STORAGE_CONTAINER']}/{DB_PATH}"
)
def open_table(table: str):
return LanceTable(get_db(), table)
def get_q(what="v"):
tables = {
"v": Q_V,
"knn": Q_KNN,
"ann": Q_ANN,
}
f = Path(os.environ["DATA"]) / Q_PATH
f.mkdir(parents=True, exist_ok=True)
return f / tables[what]
def gen_data(n: int, start=1):
seed()
for i in range(start, start + n):
yield ({"id": i, "vector": list(random() for _ in range(V_SIZE))})
@app.command()
def db_init(n: int = DB_TABLE_SIZE):
get_db().create_table(DB_TABLE, data=list(gen_data(n)))
@app.command()
def db_info():
table = open_table(DB_TABLE)
logger.debug(table.head(10))
@app.command()
def db_add(n: int, start: int):
table = open_table(DB_TABLE)
table.add(list(gen_data(n, start=start)))
@app.command()
def q_init(n: int = Q_SIZE):
pq.write_table(pa.Table.from_pylist(list(gen_data(n))), get_q())
@app.command()
def q_info():
logger.debug(pq.read_table(get_q()))
@timeit
def q_process(what: str):
table = open_table(DB_TABLE)
r = pa.Table.from_pylist(
[
{
"id": v["id"],
"neighbours": table.search(v["vector"])
.limit(10)
.select(["id"])
.to_arrow()["id"]
.to_pylist(),
}
for v in pq.read_table(get_q()).to_pylist()
]
)
pq.write_table(r, get_q(what))
@app.command()
@timeit
def create_index():
open_table(DB_TABLE).create_index(
num_sub_vectors=8
) # TODO :avoid hard coded params
@app.command()
def q_knn():
q_process("knn")
@app.command()
def q_ann():
create_index()
q_process("ann")
if __name__ == "__main__":
app()
| [
"lancedb.connect"
] | [((248, 283), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""', '"""info"""'], {}), "('LOG_LEVEL', 'info')\n", (262, 283), False, 'import os\n'), ((446, 473), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (463, 473), False, 'import logging\n'), ((480, 493), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (491, 493), False, 'import typer\n'), ((569, 608), 'os.environ.get', 'os.environ.get', (['"""DB_TABLE_SIZE"""', '(100000)'], {}), "('DB_TABLE_SIZE', 100000)\n", (583, 608), False, 'import os\n'), ((636, 665), 'os.environ.get', 'os.environ.get', (['"""Q_SIZE"""', '(100)'], {}), "('Q_SIZE', 100)\n", (650, 665), False, 'import os\n'), ((753, 764), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (758, 764), False, 'from functools import wraps\n'), ((1693, 1699), 'random.seed', 'seed', ([], {}), '()\n', (1697, 1699), False, 'from random import random, seed\n'), ((814, 833), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (831, 833), False, 'import time\n'), ((892, 911), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1134, 1158), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1138, 1158), False, 'from pathlib import Path\n'), ((1219, 1247), 'lancedb.connect', 'lancedb.connect', (['(f / DB_PATH)'], {}), '(f / DB_PATH)\n', (1234, 1247), False, 'import lancedb\n'), ((1273, 1347), 'lancedb.connect', 'lancedb.connect', (['f"""az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}"""'], {}), '(f"az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}")\n', (1288, 1347), False, 'import lancedb\n'), ((1553, 1577), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1557, 1577), False, 'from pathlib import Path\n'), ((1778, 1786), 'random.random', 'random', ([], {}), '()\n', (1784, 1786), False, 'from random import random, seed\n')] |
import argparse
import os
import shutil
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import srsly
from codetiming import Timer
from config import Settings
from dotenv import load_dotenv
from rich import progress
from schemas.wine import LanceModelWine, Wine
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.pydantic import pydantic_to_schema
from lancedb.table import Table
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> Table:
"""Ingest vector embeddings in batches for ANN index"""
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
print(f"Adding vectors to table for ANN index...")
# Add rich progress bar
with progress.Progress(
"[progress.description]{task.description}",
progress.BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
progress.TimeElapsedColumn(),
) as prog:
overall_progress_task = prog.add_task(
"Starting vectorization...", total=len(validated_data) // CHUNKSIZE
)
for chunk in chunked_data:
batch = vectorize_text(chunk)
prog.update(overall_progress_task, advance=1)
tbl.add(batch, mode="append")
def main(tbl: Table, data: list[JsonBlob]) -> None:
"""Generate sentence embeddings and create ANN and FTS indexes"""
with Timer(
name="Data validation in pydantic",
text="Validated data using Pydantic in {:.4f} sec",
):
validated_data = validate(data, exclude_none=False)
with Timer(
name="Insert vectors in batches",
text="Created sentence embeddings in {:.4f} sec",
):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} vectors into LanceDB table")
with Timer(name="Create ANN index", text="Created ANN index in {:.4f} sec"):
print("Creating ANN index...")
# Creating IVF-PQ index for now, as we eagerly await DiskANN
# Choose num partitions as a power of 2 that's closest to len(dataset) // 5000
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
with Timer(name="Create FTS index", text="Created FTS index in {:.4f} sec"):
# Create a full-text search index via Tantivy (which implements Lucene + BM25 in Rust)
tbl.create_fts_index(["to_vectorize"])
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", "-l", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[1] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
DB_NAME = "./winemag"
TABLE = "wines"
if os.path.exists(DB_NAME):
shutil.rmtree(DB_NAME)
db = lancedb.connect(DB_NAME)
try:
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="create")
except OSError:
tbl = db.open_table(TABLE)
main(tbl, data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((455, 468), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (466, 468), False, 'from dotenv import load_dotenv\n'), ((560, 571), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (569, 571), False, 'from functools import lru_cache\n'), ((668, 678), 'config.Settings', 'Settings', ([], {}), '()\n', (676, 678), False, 'from config import Settings\n'), ((2230, 2259), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2249, 2259), False, 'from sentence_transformers import SentenceTransformer\n'), ((4737, 4816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4760, 4816), False, 'import argparse\n'), ((5613, 5636), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (5627, 5636), False, 'import os\n'), ((5679, 5703), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (5694, 5703), False, 'import lancedb\n'), ((1283, 1315), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1304, 1315), False, 'import srsly\n'), ((1522, 1554), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1543, 1554), False, 'import srsly\n'), ((3582, 3680), 'codetiming.Timer', 'Timer', ([], {'name': '"""Data validation in pydantic"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Data validation in pydantic', text=\n 'Validated data using Pydantic in {:.4f} sec')\n", (3587, 3680), False, 'from codetiming import Timer\n'), ((3770, 3864), 'codetiming.Timer', 'Timer', ([], {'name': '"""Insert vectors in batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Insert vectors in batches', text=\n 'Created sentence embeddings in {:.4f} sec')\n", (3775, 3864), False, 'from codetiming import Timer\n'), ((4012, 4082), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create ANN index"""', 'text': '"""Created ANN index in {:.4f} sec"""'}), "(name='Create ANN index', text='Created ANN index in {:.4f} sec')\n", (4017, 4082), False, 'from codetiming import Timer\n'), ((4466, 4536), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create FTS index"""', 'text': '"""Created FTS index in {:.4f} sec"""'}), "(name='Create FTS index', text='Created FTS index in {:.4f} sec')\n", (4471, 4536), False, 'from codetiming import Timer\n'), ((5646, 5668), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (5659, 5668), False, 'import shutil\n'), ((3003, 3023), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (3021, 3023), False, 'from rich import progress\n'), ((3090, 3118), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (3116, 3118), False, 'from rich import progress\n'), ((1688, 1700), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1692, 1700), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5304, 5318), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5308, 5318), False, 'from pathlib import Path\n'), ((5757, 5791), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (5775, 5791), False, 'from lancedb.pydantic import pydantic_to_schema\n')] |
from datasets import load_dataset
data = load_dataset('jamescalam/youtube-transcriptions', split='train')
from lancedb.context import contextualize
df = (contextualize(data.to_pandas())
.groupby("title").text_col("text")
.window(20).stride(4)
.to_df())
df.head(1)
import openai
import os
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = ""
assert len(openai.Model.list()["data"]) > 0
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
import lancedb
from lancedb.embeddings import with_embeddings
# data = with_embeddings(embed_func, df, show_progress=True)
# data.to_pandas().head(1)
db = lancedb.connect("/tmp/lancedb")
# tbl = db.create_table("youtube-chatbot", data)
# get table
tbl = db.open_table("youtube-chatbot")
#print the length of the table
print(len(tbl))
tbl.to_pandas().head(1)
def create_prompt(query, context):
limit = 3750
prompt_start = (
"Answer the question based on the context below.\n\n"+
"Context:\n"
)
prompt_end = (
f"\n\nQuestion: {query}\nAnswer:"
)
# append contexts until hitting limit
for i in range(1, len(context)):
if len("\n\n---\n\n".join(context.text[:i])) >= limit:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text[:i-1]) +
prompt_end
)
break
elif i == len(context)-1:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text) +
prompt_end
)
print ( "prompt:", prompt )
return prompt
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None
)
return res['choices'][0]['text'].strip()
query = ("How do I use the Pandas library to create embeddings?")
# Embed the question
emb = embed_func(query)[0]
# Use LanceDB to get top 3 most relevant context
context = tbl.search(emb).limit(3).to_df()
# Get the answer from completion API
prompt = create_prompt(query, context)
print( "context:", context )
print ( complete( prompt )) | [
"lancedb.connect"
] | [((42, 106), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (54, 106), False, 'from datasets import load_dataset\n'), ((831, 862), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (846, 862), False, 'import lancedb\n'), ((549, 614), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (572, 614), False, 'import openai\n'), ((1876, 2042), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=0, max_tokens=400, top_p=1, frequency_penalty=0,\n presence_penalty=0, stop=None)\n", (1900, 2042), False, 'import openai\n'), ((483, 502), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (500, 502), False, 'import openai\n')] |
import hashlib
import io
import logging
from typing import List
import numpy as np
from lancedb.pydantic import LanceModel, vector
from PIL import Image
from pydantic import BaseModel, Field, computed_field
from homematch.config import IMAGES_DIR
logger = logging.getLogger(__name__)
class PropertyListingBase(BaseModel):
page_source: str
resource_title: str
resource_country: str
operation_type: str
active: bool
url: str
title: str
normalized_title: str
zone: str
current_price: float | None = None
ad_text: str
basic_info: List[str]
last_update: str
main_image_url: str
scraped_ts: str
@computed_field # type: ignore
@property
def identificator(self) -> str:
return hashlib.sha256(self.url.encode()).hexdigest()[:16]
@computed_field # type: ignore
@property
def text_description(self) -> str:
basic_info_text = ",".join(self.basic_info)
basic_info_text = basic_info_text.replace("habs", "bedrooms")
basic_info_text = basic_info_text.replace("baños", "bathrooms")
basic_info_text = basic_info_text.replace("baño", "bathroom")
basic_info_text = basic_info_text.replace("m²", "square meters")
basic_info_text = basic_info_text.replace("planta", "floor")
basic_info_text = basic_info_text.replace("Bajo", "0 floor")
description = ""
description += f"Zone: {self.zone}."
description += f"\nPrice: {self.current_price} euros."
description += f"\nFeatures: {basic_info_text}"
return description
class PropertyListing(PropertyListingBase):
images_dir: str = Field(str(IMAGES_DIR), description="Directory to store images")
@property
def image_path(self) -> str:
return str(self.images_dir) + f"/{self.identificator}.jpg"
def load_image(self) -> Image.Image:
try:
return Image.open(self.image_path)
except FileNotFoundError:
logger.error(f"Image file not found: {self.image_path}")
raise
@classmethod
def pil_to_bytes(cls, img: Image.Image) -> bytes:
buf = io.BytesIO()
img.save(buf, format="PNG")
return buf.getvalue()
@classmethod
def pil_to_numpy(cls, img: Image.Image) -> np.ndarray:
return np.array(img)
class PropertyData(PropertyListing):
class Config:
arbitrary_types_allowed = True
image: Image.Image
class ImageData(PropertyListing, LanceModel):
vector: vector(768) # type: ignore
image_bytes: bytes
| [
"lancedb.pydantic.vector"
] | [((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((2511, 2522), 'lancedb.pydantic.vector', 'vector', (['(768)'], {}), '(768)\n', (2517, 2522), False, 'from lancedb.pydantic import LanceModel, vector\n'), ((2146, 2158), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2156, 2158), False, 'import io\n'), ((2317, 2330), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2325, 2330), True, 'import numpy as np\n'), ((1911, 1938), 'PIL.Image.open', 'Image.open', (['self.image_path'], {}), '(self.image_path)\n', (1921, 1938), False, 'from PIL import Image\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urljoin
import attrs
import pyarrow as pa
import requests
from pydantic import BaseModel
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
def _read_ipc(resp: requests.Response) -> pa.Table:
resp_body = resp.content
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
connection_timeout: float = attrs.field(default=120.0, kw_only=True)
read_timeout: float = attrs.field(default=300.0, kw_only=True)
@functools.cached_property
def session(self) -> requests.Session:
sess = requests.Session()
retry_adapter_instance = retry_adapter(retry_adapter_options())
sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance)
adapter_class = LanceDBClientHTTPAdapterFactory()
sess.mount("https://", adapter_class())
return sess
@property
def url(self) -> str:
return (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
def close(self):
self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
def _check_status(resp: requests.Response):
if resp.status_code == 404:
raise LanceDBClientError(f"Not found: {resp.text}")
elif 400 <= resp.status_code < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status_code}, error: {resp.text}"
)
elif 500 <= resp.status_code < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status_code}, error: {resp.text}"
)
elif resp.status_code != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status_code}, error: {resp.text}"
)
@_check_not_closed
def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
with self.session.get(
urljoin(self.url, uri),
params=params,
headers=self.headers,
timeout=(self.connection_timeout, self.read_timeout),
) as resp:
self._check_status(resp)
return resp.json()
@_check_not_closed
def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
with self.session.post(
urljoin(self.url, uri),
headers=headers,
params=params,
timeout=(self.connection_timeout, self.read_timeout),
**req_kwargs,
) as resp:
self._check_status(resp)
return deserialize(resp)
@_check_not_closed
def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc)
return VectorQueryResult(tbl)
def mount_retry_adapter_for_table(self, table_name: str) -> None:
"""
Adds an http adapter to session that will retry retryable requests to the table.
"""
retry_options = retry_adapter_options(methods=["GET", "POST"])
retry_adapter_instance = retry_adapter(retry_options)
session = self.session
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/describe/"),
retry_adapter_instance,
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/index/list/"),
retry_adapter_instance,
)
def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]:
return {
"retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")),
"connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")),
"read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")),
"backoff_factor": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25")
),
"backoff_jitter": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25")
),
"statuses": [
int(i.strip())
for i in os.environ.get(
"LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503"
).split(",")
],
"methods": methods,
}
def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
total_retries = options["retries"]
connect_retries = options["connect_retries"]
read_retries = options["read_retries"]
backoff_factor = options["backoff_factor"]
backoff_jitter = options["backoff_jitter"]
statuses = options["statuses"]
methods = frozenset(options["methods"])
logging.debug(
f"Setting up retry adapter with {total_retries} retries," # noqa G003
+ f"connect retries {connect_retries}, read retries {read_retries},"
+ f"backoff factor {backoff_factor}, statuses {statuses}, "
+ f"methods {methods}"
)
return HTTPAdapter(
max_retries=Retry(
total=total_retries,
connect=connect_retries,
read=read_retries,
backoff_factor=backoff_factor,
backoff_jitter=backoff_jitter,
status_forcelist=statuses,
allowed_methods=methods,
)
)
| [
"lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory",
"lancedb.remote.VectorQueryResult",
"lancedb.remote.errors.LanceDBClientError"
] | [((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((1851, 1891), 'attrs.field', 'attrs.field', ([], {'default': '(120.0)', 'kw_only': '(True)'}), '(default=120.0, kw_only=True)\n', (1862, 1891), False, 'import attrs\n'), ((1918, 1958), 'attrs.field', 'attrs.field', ([], {'default': '(300.0)', 'kw_only': '(True)'}), '(default=300.0, kw_only=True)\n', (1929, 1958), False, 'import attrs\n'), ((8166, 8402), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (8179, 8402), False, 'import logging\n'), ((2049, 2067), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2065, 2067), False, 'import requests\n'), ((2242, 2275), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2273, 2275), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6258, 6280), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6275, 6280), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2160, 2191), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2167, 2191), False, 'from urllib.parse import urljoin\n'), ((3098, 3143), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (3116, 3143), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6665, 6716), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6672, 6716), False, 'from urllib.parse import urljoin\n'), ((6786, 6840), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6793, 6840), False, 'from urllib.parse import urljoin\n'), ((6923, 6979), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6930, 6979), False, 'from urllib.parse import urljoin\n'), ((7127, 7174), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (7141, 7174), False, 'import os\n'), ((7208, 7259), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7222, 7259), False, 'import os\n'), ((7290, 7338), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7304, 7338), False, 'import os\n'), ((7386, 7445), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7400, 7445), False, 'import os\n'), ((7502, 7561), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7516, 7561), False, 'import os\n'), ((8487, 8679), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8492, 8679), False, 'from urllib3 import Retry\n'), ((3206, 3280), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3224, 3280), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3986, 4008), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3993, 4008), False, 'from urllib.parse import urljoin\n'), ((5430, 5452), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5437, 5452), False, 'from urllib.parse import urljoin\n'), ((3373, 3462), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3391, 3462), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3544, 3620), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3562, 3620), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7643, 7710), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7657, 7710), False, 'import os\n')] |
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
LatexTextSplitter,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
import argparse, os, arxiv
os.environ["OPENAI_API_KEY"] = "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"
embeddings = OpenAIEmbeddings()
from langchain.vectorstores import LanceDB
from lancedb.pydantic import Vector, LanceModel
from Typing import List
from datetime import datetime
import lancedb
global embedding_out_length
embedding_out_length = 1536
class Content(LanceModel):
id: str
arxiv_id: str
vector: Vector(embedding_out_length)
text: str
uploaded_date: datetime
title: str
authors: List[str]
abstract: str
categories: List[str]
url: str
def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1):
pass
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files")
argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored")
argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created")
argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive")
argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key")
argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF")
argparser.add_argument('-n', '--nthreads', type=int, default=-1)
args = argparser.parse_args()
SRC_DIR = args.src_dir
DB_NAME = args.db_name
TABLE_NAME = args.table_name
OPENAI_API_KEY = args.openai_api_key
NTHREADS = args.nthreads
db = lancedb.connect(DB_NAME)
table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite")
db = lancedb.connect()
meta_data = {"arxiv_id": "1", "title": "EIC LLM",
"category" : "N/A",
"authors": "N/A",
"sub_categories": "N/A",
"abstract": "N/A",
"published": "N/A",
"updated": "N/A",
"doi": "N/A"
},
table = db.create_table(
"EIC_archive",
data=[
{
"vector": embeddings.embed_query("EIC LLM"),
"text": "EIC LLM",
"id": "1",
"arxiv_id" : "N/A",
"title" : "N/A",
"category" : "N/A",
"published" : "N/A"
}
],
mode="overwrite",
)
vectorstore = LanceDB(connection = table, embedding = embeddings)
sourcedir = "PDFs"
count = 0
for source in os.listdir(sourcedir):
if not os.path.isdir(os.path.join("PDFs", source)):
continue
print (f"Adding the source document {source} to the Vector DB")
import arxiv
client = arxiv.Client()
search = arxiv.Search(id_list=[source])
paper = next(arxiv.Client().results(search))
meta_data = {"arxiv_id": paper.entry_id,
"title": paper.title,
"category" : categories[paper.primary_category],
"published": paper.published
}
for file in os.listdir(os.path.join(sourcedir, source)):
if file.endswith(".tex"):
latex_file = os.path.join(sourcedir, source, file)
print (source, latex_file)
documents = TextLoader(latex_file, encoding = 'latin-1').load()
latex_splitter = LatexTextSplitter(
chunk_size=120, chunk_overlap=10
)
documents = latex_splitter.split_documents(documents)
for doc in documents:
for k, v in meta_data.items():
doc.metadata[k] = v
vectorstore.add_documents(documents = documents)
count+=len(documents) | [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')] |
import time
import os
import pandas as pd
import streamlit as st
import lancedb
from lancedb.embeddings import with_embeddings
from langchain import PromptTemplate
import predictionguard as pg
import streamlit as st
import duckdb
import re
import numpy as np
from sentence_transformers import SentenceTransformer
#---------------------#
# Lance DB Setup #
#---------------------#
uri = "schema.lancedb"
db = lancedb.connect(uri)
def embed(query, embModel):
return embModel.encode(query)
def batch_embed_func(batch):
return [st.session_state['en_emb'].encode(sentence) for sentence in batch]
#---------------------#
# Streamlit config #
#---------------------#
if "login" not in st.session_state:
st.session_state["login"] = False
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Define datasets #
#--------------------------#
#JOBS
df1=pd.read_csv('datasets/jobs.csv')
#SOCIAL
df2=pd.read_csv('datasets/social.csv')
#movies
df3=pd.read_csv('datasets/movies.csv')
conn = duckdb.connect(database=':memory:')
conn.register('jobs', df1)
conn.register('social', df2)
conn.register('movies', df3)
#--------------------------#
# Prompt Templates #
#--------------------------#
### PROMPT TEMPLATES
### PROMPT TEMPLATES
qa_template = """### System:
You are a data chatbot who answers the user question. To answer these questions we need to run SQL queries on our data and its output is given below in context. You just have to frame your answer using that context. Give a short and crisp response.Don't add any notes or any extra information after your response.
### User:
Question: {question}
context: {context}
### Assistant:
"""
qa_prompt = PromptTemplate(template=qa_template,input_variables=["question", "context"])
sql_template = """<|begin_of_sentence|>You are a SQL expert and you only generate SQL queries which are executable. You provide no extra explanations.
You respond with a SQL query that answers the user question in the below instruction by querying a database with the schema provided in the below instruction.
Always start your query with SELECT statement and end with a semicolon.
### Instruction:
User question: \"{question}\"
Database schema:
{schema}
### Response:
"""
sql_prompt=PromptTemplate(template=sql_template, input_variables=["question","schema"])
#--------------------------#
# Generate SQL Query #
#--------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
def load_model():
return SentenceTransformer(name)
model = load_model()
def generate_sql_query(question, schema):
prompt_filled = sql_prompt.format(question=question,schema=schema)
try:
result = pg.Completion.create(
model="deepseek-coder-6.7b-instruct",
prompt=prompt_filled,
max_tokens=300,
temperature=0.1
)
sql_query = result["choices"][0]["text"]
return sql_query
except Exception as e:
return None
def extract_and_refine_sql_query(sql_query):
# Extract SQL query using a regular expression
match = re.search(r"(SELECT.*?);", sql_query, re.DOTALL)
if match:
refined_query = match.group(1)
# Check for and remove any text after a colon
colon_index = refined_query.find(':')
if colon_index != -1:
refined_query = refined_query[:colon_index]
# Ensure the query ends with a semicolon
if not refined_query.endswith(';'):
refined_query += ';'
return refined_query
else:
return ""
def get_answer_from_sql(question):
# Search Relavent Tables
table = db.open_table("schema")
results = table.search(embed(question, model)).limit(2).to_df()
print(results)
results = results[results['_distance'] < 1.5]
print("Results:", results)
if len(results) == 0:
completion = "We did not find any relevant tables."
return completion
else:
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = ""
for _, row in results.iterrows():
if len(row['text'].split(' ')) < 10:
continue
else:
schema=row['schema']
table_name=row['text']
st.sidebar.info(table_name)
st.sidebar.code(schema)
break
sql_query = generate_sql_query(question, schema)
sql_query = extract_and_refine_sql_query(sql_query)
try:
# print("Executing SQL Query:", sql_query)
result = conn.execute(sql_query).fetchall()
# print("Result:", result)
return result, sql_query
except Exception as e:
print(f"Error executing SQL query: {e}")
return "There was an error executing the SQL query."
#--------------------------#
# Get Answer #
#--------------------------#
def get_answer(question,context):
try:
prompt_filled = qa_prompt.format(question=question, context=context)
# Respond to the user
output = pg.Completion.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
max_tokens=200,
temperature=0.1
)
completion = output['choices'][0]['text']
return completion
except Exception as e:
completion = "There was an error executing the SQL query."
return completion
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask a question"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# contruct prompt thread
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 2:
examples = examples[-2:]
else:
thread = ""
# # Check for PII
# with st.spinner("Checking for PII..."):
# pii_result = pg.PII.check(
# prompt=latest_message,
# replace=False,
# replace_method="fake"
# )
# # Check for injection
# with st.spinner("Checking for security vulnerabilities..."):
# injection_result = pg.Injection.check(
# prompt=latest_message,
# detect=True
# )
# # Handle insecure states
# elif "[" in pii_result['checks'][0]['pii_types_and_positions']:
# st.warning('Warning! PII detected. Please avoid using personal information.')
# full_response = "Warning! PII detected. Please avoid using personal information."
# elif injection_result['checks'][0]['probability'] > 0.5:
# st.warning('Warning! Injection detected. Your input might result in a security breach.')
# full_response = "Warning! Injection detected. Your input might result in a security breach."
# generate response
with st.spinner("Generating an answer..."):
context=get_answer_from_sql(latest_message)
print("context",context)
completion = get_answer(latest_message,context)
# display response
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "▌")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.connect"
] | [((413, 433), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (428, 433), False, 'import lancedb\n'), ((890, 947), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (901, 947), True, 'import streamlit as st\n'), ((1043, 1075), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (1054, 1075), True, 'import pandas as pd\n'), ((1089, 1123), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (1100, 1123), True, 'import pandas as pd\n'), ((1137, 1171), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (1148, 1171), True, 'import pandas as pd\n'), ((1180, 1215), 'duckdb.connect', 'duckdb.connect', ([], {'database': '""":memory:"""'}), "(database=':memory:')\n", (1194, 1215), False, 'import duckdb\n'), ((1861, 1938), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['question', 'context']"}), "(template=qa_template, input_variables=['question', 'context'])\n", (1875, 1938), False, 'from langchain import PromptTemplate\n'), ((2426, 2503), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'sql_template', 'input_variables': "['question', 'schema']"}), "(template=sql_template, input_variables=['question', 'schema'])\n", (2440, 2503), False, 'from langchain import PromptTemplate\n'), ((2672, 2697), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2691, 2697), False, 'from sentence_transformers import SentenceTransformer\n'), ((3239, 3286), 're.search', 're.search', (['"""(SELECT.*?);"""', 'sql_query', 're.DOTALL'], {}), "('(SELECT.*?);', sql_query, re.DOTALL)\n", (3248, 3286), False, 'import re\n'), ((5846, 5877), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question"""'], {}), "('Ask a question')\n", (5859, 5877), True, 'import streamlit as st\n'), ((5883, 5952), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5915, 5952), True, 'import streamlit as st\n'), ((8226, 8311), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (8258, 8311), True, 'import streamlit as st\n'), ((2856, 2974), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""deepseek-coder-6.7b-instruct"""', 'prompt': 'prompt_filled', 'max_tokens': '(300)', 'temperature': '(0.1)'}), "(model='deepseek-coder-6.7b-instruct', prompt=\n prompt_filled, max_tokens=300, temperature=0.1)\n", (2876, 2974), True, 'import predictionguard as pg\n'), ((5195, 5298), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'max_tokens': '(200)', 'temperature': '(0.1)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n max_tokens=200, temperature=0.1)\n", (5215, 5298), True, 'import predictionguard as pg\n'), ((5758, 5790), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5773, 5790), True, 'import streamlit as st\n'), ((5800, 5831), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5811, 5831), True, 'import streamlit as st\n'), ((5962, 5985), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5977, 5985), True, 'import streamlit as st\n'), ((5995, 6014), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6006, 6014), True, 'import streamlit as st\n'), ((6025, 6053), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6040, 6053), True, 'import streamlit as st\n'), ((6085, 6095), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6093, 6095), True, 'import streamlit as st\n'), ((7748, 7785), 'streamlit.spinner', 'st.spinner', (['"""Generating an answer..."""'], {}), "('Generating an answer...')\n", (7758, 7785), True, 'import streamlit as st\n'), ((4413, 4440), 'streamlit.sidebar.info', 'st.sidebar.info', (['table_name'], {}), '(table_name)\n', (4428, 4440), True, 'import streamlit as st\n'), ((4457, 4480), 'streamlit.sidebar.code', 'st.sidebar.code', (['schema'], {}), '(schema)\n', (4472, 4480), True, 'import streamlit as st\n'), ((8147, 8164), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (8157, 8164), False, 'import time\n')] |
from FlagEmbedding import LLMEmbedder, FlagReranker
import os
import lancedb
import re
import pandas as pd
import random
from datasets import load_dataset
import torch
import gc
import lance
from lancedb.embeddings import with_embeddings
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
embed_model = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
"""# Load `Chunks` of data from [BeIR Dataset](https://huggingface.co/datasets/BeIR/scidocs)
Note: This is a dataset built specially for retrieval tasks to see how good your search is working
"""
data=pd.read_csv("Kcc_subset.csv")
# just random samples for faster embed demo
data['documents'] = 'query:' + data['QueryText'] + ', answer:' + data['KccAns']
data = data.dropna()
def embed_documents(batch):
"""
Function to embed the whole text data
"""
return embed_model.encode_keys(batch, task=task) # Encode data or 'keys'
db = lancedb.connect("./db") # Connect Local DB
if "doc_embed" in db.table_names():
table = db.open_table("doc_embed") # Open Table
else:
# Use the train text chunk data to save embed in the DB
data1 = with_embeddings(embed_documents, data, column = 'documents',show_progress = True, batch_size = 512)
table = db.create_table("doc_embed", data=data1) # create Table
"""# Search from a random Text"""
def search(query, top_k = 10):
"""
Search a query from the table
"""
query_vector = embed_model.encode_queries(query, task=task) # Encode the QUERY (it is done differently than the 'key')
search_results = table.search(query_vector).limit(top_k)
return ",".join(search_results.to_pandas().dropna(subset = "QueryText").reset_index(drop = True)["documents"].to_list())
# query = "how to control flower drop in bottelgourd?"
# print("QUERY:-> ", query)
# # get top_k search results
# search_results = search(query, top_k = 10).to_pandas().dropna(subset = "Query").reset_index(drop = True)["documents"]
# print(",".join(search_results.to_list))
# def rerank(query, search_results):
# search_results["old_similarity_rank"] = search_results.index+1 # Old ranks
# torch.cuda.empty_cache()
# gc.collect()
# search_results["new_scores"] = reranker_model.compute_score([[query,chunk] for chunk in search_results["text"]]) # Re compute ranks
# return search_results.sort_values(by = "new_scores", ascending = False).reset_index(drop = True)
# print("QUERY:-> ", query) | [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((356, 404), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (367, 404), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((463, 516), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (475, 516), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((803, 832), 'pandas.read_csv', 'pd.read_csv', (['"""Kcc_subset.csv"""'], {}), "('Kcc_subset.csv')\n", (814, 832), True, 'import pandas as pd\n'), ((1162, 1185), 'lancedb.connect', 'lancedb.connect', (['"""./db"""'], {}), "('./db')\n", (1177, 1185), False, 'import lancedb\n'), ((1370, 1469), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'data'], {'column': '"""documents"""', 'show_progress': '(True)', 'batch_size': '(512)'}), "(embed_documents, data, column='documents', show_progress=\n True, batch_size=512)\n", (1385, 1469), False, 'from lancedb.embeddings import with_embeddings\n')] |
import time
import re
import shutil
import os
import urllib
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import PredictionGuard
import streamlit as st
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
#--------------------------#
# Prompt templates #
#--------------------------#
demo_formatter_template = """\nUser: {user}
Assistant: {assistant}\n"""
demo_prompt = PromptTemplate(
input_variables=["user", "assistant"],
template=demo_formatter_template,
)
category_template = """### Instruction:
Read the below input and determine if it is a request to generate computer code? Respond "yes" or "no".
### Input:
{query}
### Response:
"""
category_prompt = PromptTemplate(
input_variables=["query"],
template=category_template
)
qa_template = """### Instruction:
Read the context below and respond with an answer to the question. If the question cannot be answered based on the context alone or the context does not explicitly say the answer to the question, write "Sorry I had trouble answering this question, based on the information I found."
### Input:
Context: {context}
Question: {query}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "query"],
template=qa_template
)
chat_template = """### Instruction:
You are a friendly and clever AI assistant. Respond to the latest human message in the input conversation below.
### Input:
{context}
Human: {query}
AI:
### Response:
"""
chat_prompt = PromptTemplate(
input_variables=["context", "query"],
template=chat_template
)
code_template = """### Instruction:
You are a code generation assistant. Respond with a code snippet and any explanation requested in the below input.
### Input:
{query}
### Response:
"""
code_prompt = PromptTemplate(
input_variables=["query"],
template=code_template
)
#-------------------------#
# Vector search #
#-------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
if os.path.exists(".lancedb"):
shutil.rmtree(".lancedb")
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
def vector_search_urls(urls, query, sessionid):
for url in urls:
# Let's get the html off of a website.
fp = urllib.request.urlopen(url)
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
docs = [x.replace('#', '-') for x in docs]
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([
i,
docs[i],
url
])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text", "url"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the table if there isn't one.
if sessionid not in db.table_names():
db.create_table(sessionid, data=data)
else:
table = db.open_table(sessionid)
table.add(data=data)
# Perform the query
table = db.open_table(sessionid)
results = table.search(embed(query)).limit(1).to_df()
results = results[results['_distance'] < 1.0]
if len(results) == 0:
doc_use = ""
else:
doc_use = results['text'].values[0]
# Clean up
db.drop_table(sessionid)
return doc_use
#-------------------------#
# Info Agent #
#-------------------------#
tools = load_tools(["serpapi"], llm=PredictionGuard(model="Nous-Hermes-Llama2-13B"))
agent = initialize_agent(
tools,
PredictionGuard(model="Nous-Hermes-Llama2-13B"),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
max_execution_time=30)
#-------------------------#
# Helper functions #
#-------------------------#
def find_urls(text):
return re.findall(r'(https?://[^\s]+)', text)
# QuestionID provides some help in determining if a sentence is a question.
class QuestionID:
"""
QuestionID has the actual logic used to determine if sentence is a question
"""
def padCharacter(self, character: str, sentence: str):
if character in sentence:
position = sentence.index(character)
if position > 0 and position < len(sentence):
# Check for existing white space before the special character.
if (sentence[position - 1]) != " ":
sentence = sentence.replace(character, (" " + character))
return sentence
def predict(self, sentence: str):
questionStarters = [
"which", "wont", "cant", "isnt", "arent", "is", "do", "does",
"will", "can"
]
questionElements = [
"who", "what", "when", "where", "why", "how", "sup", "?"
]
sentence = sentence.lower()
sentence = sentence.replace("\'", "")
sentence = self.padCharacter('?', sentence)
splitWords = sentence.split()
if any(word == splitWords[0] for word in questionStarters) or any(
word in splitWords for word in questionElements):
return True
else:
return False
#---------------------#
# Streamlit config #
#---------------------#
#st.set_page_config(layout="wide")
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Streamlit sidebar #
#--------------------------#
st.sidebar.title("Super Chat 🚀")
st.sidebar.markdown(
"This app provides a chat interface driven by various generative AI models and "
"augmented (via information retrieval and agentic processing)."
)
url_text = st.sidebar.text_area(
"Enter one or more urls for reference information (separated by a comma):",
"", height=100)
if len(url_text) > 0:
urls = url_text.split(",")
else:
urls = []
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Hello?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# process the context
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 4:
examples = examples[-4:]
# Determine what kind of message this is.
with st.spinner("Trying to figure out what you are wanting..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=category_prompt.format(query=latest_message),
output={
"type": "categorical",
"categories": ["yes", "no"]
}
)
# configure out chain
code = result['choices'][0]['output']
qIDModel = QuestionID()
question = qIDModel.predict(latest_message)
if code == "no" and question:
# if there are urls, let's embed them as a primary data source.
if len(urls) > 0:
with st.spinner("Performing vector search..."):
info_context = vector_search_urls(urls, latest_message, "assistant")
else:
info_context = ""
# Handle the informational request.
if info_context != "":
with st.spinner("Generating a RAG result..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=qa_prompt.format(context=info_context, query=latest_message)
)
completion = result['choices'][0]['text'].split('#')[0].strip()
# Otherwise try an agentic approach.
else:
with st.spinner("Trying to find an answer with an agent..."):
try:
completion = agent.run(latest_message)
except:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
if "Agent stopped" in completion:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
elif code == "yes":
# Handle the code generation request.
with st.spinner("Generating code..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=code_prompt.format(query=latest_message),
max_tokens=500
)
completion = result['choices'][0]['text']
else:
# contruct prompt
few_shot_prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=demo_prompt,
example_separator="",
prefix="The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n",
suffix="\nHuman: {human}\nAssistant: ",
input_variables=["human"],
)
prompt = few_shot_prompt.format(human=latest_message)
# generate response
with st.spinner("Generating chat response..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt,
)
completion = result['choices'][0]['text']
# Print out the response.
completion = completion.split("Human:")[0].strip()
completion = completion.split("H:")[0].strip()
completion = completion.split('#')[0].strip()
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "▌")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((728, 820), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user', 'assistant']", 'template': 'demo_formatter_template'}), "(input_variables=['user', 'assistant'], template=\n demo_formatter_template)\n", (742, 820), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1030, 1099), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'category_template'}), "(input_variables=['query'], template=category_template)\n", (1044, 1099), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1510, 1584), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'qa_template'}), "(input_variables=['context', 'query'], template=qa_template)\n", (1524, 1584), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1820, 1896), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'chat_template'}), "(input_variables=['context', 'query'], template=chat_template)\n", (1834, 1896), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2113, 2178), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'code_template'}), "(input_variables=['query'], template=code_template)\n", (2127, 2178), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2328, 2353), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2347, 2353), False, 'from sentence_transformers import SentenceTransformer\n'), ((2513, 2539), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (2527, 2539), False, 'import os\n'), ((2571, 2591), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (2579, 2591), False, 'import os\n'), ((2614, 2634), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2629, 2634), False, 'import lancedb\n'), ((6281, 6338), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (6292, 6338), True, 'import streamlit as st\n'), ((6429, 6461), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Super Chat 🚀"""'], {}), "('Super Chat 🚀')\n", (6445, 6461), True, 'import streamlit as st\n'), ((6462, 6634), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing)."""'], {}), "(\n 'This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing).'\n )\n", (6481, 6634), True, 'import streamlit as st\n'), ((6649, 6770), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter one or more urls for reference information (separated by a comma):"""', '""""""'], {'height': '(100)'}), "(\n 'Enter one or more urls for reference information (separated by a comma):',\n '', height=100)\n", (6669, 6770), True, 'import streamlit as st\n'), ((2545, 2570), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (2558, 2570), False, 'import shutil\n'), ((4440, 4487), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4455, 4487), False, 'from langchain.llms import PredictionGuard\n'), ((4702, 4740), 're.findall', 're.findall', (['"""(https?://[^\\\\s]+)"""', 'text'], {}), "('(https?://[^\\\\s]+)', text)\n", (4712, 4740), False, 'import re\n'), ((7149, 7172), 'streamlit.chat_input', 'st.chat_input', (['"""Hello?"""'], {}), "('Hello?')\n", (7162, 7172), True, 'import streamlit as st\n'), ((7178, 7247), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (7210, 7247), True, 'import streamlit as st\n'), ((11513, 11598), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11545, 11598), True, 'import streamlit as st\n'), ((2767, 2794), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2789, 2794), False, 'import urllib\n'), ((2927, 2948), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (2946, 2948), False, 'import html2text\n'), ((3111, 3166), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (3132, 3166), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3507, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text', 'url']"}), "(metadata, columns=['chunk', 'text', 'url'])\n", (3519, 3563), True, 'import pandas as pd\n'), ((3618, 3654), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (3633, 3654), False, 'from lancedb.embeddings import with_embeddings\n'), ((4349, 4396), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4364, 4396), False, 'from langchain.llms import PredictionGuard\n'), ((7061, 7093), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7076, 7093), True, 'import streamlit as st\n'), ((7103, 7134), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7114, 7134), True, 'import streamlit as st\n'), ((7257, 7280), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7272, 7280), True, 'import streamlit as st\n'), ((7290, 7309), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (7301, 7309), True, 'import streamlit as st\n'), ((7320, 7348), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (7335, 7348), True, 'import streamlit as st\n'), ((7380, 7390), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (7388, 7390), True, 'import streamlit as st\n'), ((7955, 8013), 'streamlit.spinner', 'st.spinner', (['"""Trying to figure out what you are wanting..."""'], {}), "('Trying to figure out what you are wanting...')\n", (7965, 8013), True, 'import streamlit as st\n'), ((11438, 11455), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (11448, 11455), False, 'import time\n'), ((10288, 10613), 'langchain.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'demo_prompt', 'example_separator': '""""""', 'prefix': '"""The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""', 'suffix': '"""\nHuman: {human}\nAssistant: """', 'input_variables': "['human']"}), '(examples=examples, example_prompt=demo_prompt,\n example_separator=\'\', prefix=\n """The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""\n , suffix="""\nHuman: {human}\nAssistant: """, input_variables=[\'human\'])\n', (10309, 10613), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((8640, 8681), 'streamlit.spinner', 'st.spinner', (['"""Performing vector search..."""'], {}), "('Performing vector search...')\n", (8650, 8681), True, 'import streamlit as st\n'), ((8929, 8969), 'streamlit.spinner', 'st.spinner', (['"""Generating a RAG result..."""'], {}), "('Generating a RAG result...')\n", (8939, 8969), True, 'import streamlit as st\n'), ((9377, 9432), 'streamlit.spinner', 'st.spinner', (['"""Trying to find an answer with an agent..."""'], {}), "('Trying to find an answer with an agent...')\n", (9387, 9432), True, 'import streamlit as st\n'), ((9910, 9942), 'streamlit.spinner', 'st.spinner', (['"""Generating code..."""'], {}), "('Generating code...')\n", (9920, 9942), True, 'import streamlit as st\n'), ((10823, 10864), 'streamlit.spinner', 'st.spinner', (['"""Generating chat response..."""'], {}), "('Generating chat response...')\n", (10833, 10864), True, 'import streamlit as st\n'), ((10891, 10958), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (10911, 10958), True, 'import predictionguard as pg\n')] |
import logging
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
from hamilton import contrib
with contrib.catch_import_errors(__name__, __file__, logger):
import pyarrow as pa
import lancedb
import numpy as np
import pandas as pd
from lancedb.pydantic import LanceModel
from hamilton.function_modifiers import tag
VectorType = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
DataType = Union[Dict, List[Dict], pd.DataFrame, pa.Table, Iterable[pa.RecordBatch]]
TableSchema = Union[pa.Schema, LanceModel]
def client(uri: Union[str, Path] = "./.lancedb") -> lancedb.DBConnection:
"""Create a LanceDB connection.
:param uri: path to local LanceDB
:return: connection to LanceDB instance.
"""
return lancedb.connect(uri=uri)
def _create_table(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create a new table based on schema."""
mode = "overwrite" if overwrite_table else "create"
table = client.create_table(name=table_name, schema=schema, mode=mode)
return table
@tag(side_effect="True")
def table_ref(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create or reference a LanceDB table
:param vdb_client: LanceDB connection.
:param table_name: Name of the table.
:param schema: Pyarrow schema defining the table schema.
:param overwrite_table: If True, overwrite existing table
:return: Reference to existing or newly created table.
"""
try:
table = client.open_table(table_name)
except FileNotFoundError:
if schema is None:
raise ValueError("`schema` must be provided to create table.")
table = _create_table(
client=client,
table_name=table_name,
schema=schema,
overwrite_table=overwrite_table,
)
return table
@tag(side_effect="True")
def reset(client: lancedb.DBConnection) -> Dict[str, List[str]]:
"""Drop all existing tables.
:param vdb_client: LanceDB connection.
:return: dictionary containing all the dropped tables.
"""
tables_dropped = []
for table_name in client.table_names():
client.drop_table(table_name)
tables_dropped.append(table_name)
return dict(tables_dropped=tables_dropped)
@tag(side_effect="True")
def insert(table_ref: lancedb.db.LanceTable, data: DataType) -> Dict:
"""Push new data to the specified table.
:param table_ref: Reference to the LanceDB table.
:param data: Data to add to the table. Ref: https://lancedb.github.io/lancedb/guides/tables/#adding-to-a-table
:return: Reference to the table and number of rows added
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.add(data)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_added = n_rows_after - n_rows_before
return dict(table=table_ref, n_rows_added=n_rows_added)
@tag(side_effect="True")
def delete(table_ref: lancedb.db.LanceTable, delete_expression: str) -> Dict:
"""Delete existing data using an SQL expression.
:param table_ref: Reference to the LanceDB table.
:param data: Expression to select data. Ref: https://lancedb.github.io/lancedb/sql/
:return: Reference to the table and number of rows deleted
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.delete(delete_expression)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_deleted = n_rows_before - n_rows_after
return dict(table=table_ref, n_rows_deleted=n_rows_deleted)
def vector_search(
table_ref: lancedb.db.LanceTable,
vector_query: VectorType,
columns: Optional[List[str]] = None,
where: Optional[str] = None,
prefilter_where: bool = False,
limit: int = 10,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param vector_query: embedding of the query
:param columns: columns to include in the results
:param where: SQL where clause to pre- or post-filter results
:param prefilter_where: If True filter rows before search else filter after search
:param limit: number of rows to return
:return: A dataframe of results
"""
query_ = (
table_ref.search(
query=vector_query,
query_type="vector",
vector_column_name="vector",
)
.select(columns=columns)
.where(where, prefilter=prefilter_where)
.limit(limit=limit)
)
return query_.to_pandas()
def full_text_search(
table_ref: lancedb.db.LanceTable,
full_text_query: str,
full_text_index: Union[str, List[str]],
where: Optional[str] = None,
limit: int = 10,
rebuild_index: bool = True,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param full_text_query: text query
:param full_text_index: one or more text columns to search
:param where: SQL where clause to pre- or post-filter results
:param limit: number of rows to return
:param rebuild_index: If True rebuild the index
:return: A dataframe of results
"""
# NOTE. Currently, the index needs to be recreated whenever data is added
# ref: https://lancedb.github.io/lancedb/fts/#installation
if rebuild_index:
table_ref.create_fts_index(full_text_index)
query_ = (
table_ref.search(query=full_text_query, query_type="fts")
.select(full_text_index)
.where(where)
.limit(limit)
)
return query_.to_pandas()
| [
"lancedb.connect"
] | [((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging\n'), ((1219, 1242), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (1222, 1242), False, 'from hamilton.function_modifiers import tag\n'), ((2122, 2145), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2125, 2145), False, 'from hamilton.function_modifiers import tag\n'), ((2554, 2577), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2557, 2577), False, 'from hamilton.function_modifiers import tag\n'), ((3166, 3189), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (3169, 3189), False, 'from hamilton.function_modifiers import tag\n'), ((171, 226), 'hamilton.contrib.catch_import_errors', 'contrib.catch_import_errors', (['__name__', '__file__', 'logger'], {}), '(__name__, __file__, logger)\n', (198, 226), False, 'from hamilton import contrib\n'), ((816, 840), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'uri'}), '(uri=uri)\n', (831, 840), False, 'import lancedb\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and
disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit, and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# 'engagement_time_msec': 1000 # TODO: In future we might be interested in this metric
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and verbose is disabled
# to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5422, 5446), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5431, 5446), False, 'from lancedb.utils.general import TryExcept\n'), ((4584, 4595), 'time.time', 'time.time', ([], {}), '()\n', (4593, 4595), False, 'import time\n'), ((2567, 2598), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2596, 2598), False, 'import platform\n'), ((2735, 2750), 'random.random', 'random.random', ([], {}), '()\n', (2748, 2750), False, 'import random\n'), ((4127, 4174), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4148, 4174), False, 'import datetime\n')] |
import argparse
import os
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import lancedb
import pandas as pd
import srsly
from codetiming import Timer
from dotenv import load_dotenv
from lancedb.pydantic import pydantic_to_schema
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
sys.path.insert(1, os.path.realpath(Path(__file__).resolve().parents[1]))
from api.config import Settings
from schemas.wine import LanceModelWine, Wine
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> pd.DataFrame:
with ProcessPoolExecutor(max_workers=WORKERS) as executor:
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
embed_data = []
for chunk in tqdm(chunked_data, total=len(validated_data) // CHUNKSIZE):
futures = [executor.submit(vectorize_text, chunk)]
embed_data = [f.result() for f in as_completed(futures) if f.result()][0]
df = pd.DataFrame.from_dict(embed_data)
tbl.add(df, mode="overwrite")
def main(data: list[JsonBlob]) -> None:
DB_NAME = f"../{get_settings().lancedb_dir}"
TABLE = "wines"
db = lancedb.connect(DB_NAME)
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="overwrite")
print(f"Created table `{TABLE}`, with length {len(tbl)}")
with Timer(name="Bulk Index", text="Validated data using Pydantic in {:.4f} sec"):
validated_data = validate(data, exclude_none=False)
with Timer(name="Embed batches", text="Created sentence embeddings in {:.4f} sec"):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} items into LanceDB table")
with Timer(name="Create index", text="Created IVF-PQ index in {:.4f} sec"):
# Creating index (choose num partitions as a power of 2 that's closest to len(dataset) // 5000)
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
parser.add_argument("--workers", type=int, default=4, help="Number of workers to use for vectorization")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[3] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
WORKERS = args["workers"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
main(data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((580, 593), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (591, 593), False, 'from dotenv import load_dotenv\n'), ((685, 696), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (694, 696), False, 'from functools import lru_cache\n'), ((793, 803), 'api.config.Settings', 'Settings', ([], {}), '()\n', (801, 803), False, 'from api.config import Settings\n'), ((2355, 2384), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2374, 2384), False, 'from sentence_transformers import SentenceTransformer\n'), ((3439, 3463), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3454, 3463), False, 'import lancedb\n'), ((4390, 4469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4413, 4469), False, 'import argparse\n'), ((1408, 1440), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1429, 1440), False, 'import srsly\n'), ((1647, 1679), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1668, 1679), False, 'import srsly\n'), ((2852, 2892), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'WORKERS'}), '(max_workers=WORKERS)\n', (2871, 2892), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((3631, 3707), 'codetiming.Timer', 'Timer', ([], {'name': '"""Bulk Index"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Bulk Index', text='Validated data using Pydantic in {:.4f} sec')\n", (3636, 3707), False, 'from codetiming import Timer\n'), ((3779, 3856), 'codetiming.Timer', 'Timer', ([], {'name': '"""Embed batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Embed batches', text='Created sentence embeddings in {:.4f} sec')\n", (3784, 3856), False, 'from codetiming import Timer\n'), ((3981, 4050), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create index"""', 'text': '"""Created IVF-PQ index in {:.4f} sec"""'}), "(name='Create index', text='Created IVF-PQ index in {:.4f} sec')\n", (3986, 4050), False, 'from codetiming import Timer\n'), ((3242, 3276), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['embed_data'], {}), '(embed_data)\n', (3264, 3276), True, 'import pandas as pd\n'), ((3505, 3539), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (3523, 3539), False, 'from lancedb.pydantic import pydantic_to_schema\n'), ((1813, 1825), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1817, 1825), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5060, 5074), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5064, 5074), False, 'from pathlib import Path\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((3185, 3206), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (3197, 3206), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Any, Callable, Dict, Iterable, Optional, Union
import aiohttp
import attrs
import pyarrow as pa
from pydantic import BaseModel
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
async def _read_ipc(resp: aiohttp.ClientResponse) -> pa.Table:
resp_body = await resp.read()
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
@functools.cached_property
def session(self) -> aiohttp.ClientSession:
url = (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
return aiohttp.ClientSession(url)
async def close(self):
await self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
async def _check_status(resp: aiohttp.ClientResponse):
if resp.status == 404:
raise LanceDBClientError(f"Not found: {await resp.text()}")
elif 400 <= resp.status < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status}, error: {await resp.text()}"
)
elif 500 <= resp.status < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status}, error: {await resp.text()}"
)
elif resp.status != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status}, error: {await resp.text()}"
)
@_check_not_closed
async def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
async with self.session.get(
uri,
params=params,
headers=self.headers,
timeout=aiohttp.ClientTimeout(total=30),
) as resp:
await self._check_status(resp)
return await resp.json()
@_check_not_closed
async def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
async with self.session.post(
uri,
headers=headers,
params=params,
timeout=aiohttp.ClientTimeout(total=30),
**req_kwargs,
) as resp:
resp: aiohttp.ClientResponse = resp
await self._check_status(resp)
return await deserialize(resp)
@_check_not_closed
async def list_tables(
self, limit: int, page_token: Optional[str] = None
) -> Iterable[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = await self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
async def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = await self.post(
f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc
)
return VectorQueryResult(tbl)
| [
"lancedb.remote.VectorQueryResult"
] | [((1402, 1427), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1414, 1427), False, 'import attrs\n'), ((1006, 1024), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1021, 1024), False, 'import functools\n'), ((1548, 1573), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1559, 1573), False, 'import attrs\n'), ((1594, 1632), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1605, 1632), False, 'import attrs\n'), ((1856, 1882), 'aiohttp.ClientSession', 'aiohttp.ClientSession', (['url'], {}), '(url)\n', (1877, 1882), False, 'import aiohttp\n'), ((5743, 5765), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (5760, 5765), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1327, 1353), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1342, 1353), True, 'import pyarrow as pa\n'), ((3473, 3504), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (3494, 3504), False, 'import aiohttp\n'), ((4904, 4935), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (4925, 4935), False, 'import aiohttp\n')] |
import os
import time
import shutil
import pandas as pd
import lancedb
from lancedb.embeddings import with_embeddings
from langchain import PromptTemplate
import predictionguard as pg
import numpy as np
from sentence_transformers import SentenceTransformer
#---------------------#
# Lance DB Setup #
#---------------------#
#Import datasets
#JOBS
df1=pd.read_csv('datasets/jobs.csv')
df1_table_name = "jobs"
#SOCIAL
df2=pd.read_csv('datasets/social.csv')
df2_table_name = "social"
#movies
df3=pd.read_csv('datasets/movies.csv')
df3_table_name = "movies"
# local path of the vector db
uri = "schema.lancedb"
db = lancedb.connect(uri)
# Embeddings setup
name="all-MiniLM-L12-v2"
# Load model
def load_model():
return SentenceTransformer(name)
def embed(query, embModel):
return embModel.encode(query)
#---------------------#
# SQL Schema Creation #
#---------------------#
def create_schema(df,table_name):
# Here we will create an example SQL schema based on the data in this dataset.
# In a real use case, you likely already have this sort of CREATE TABLE statement.
# Performance can be improved by manually curating the descriptions.
columns_info = []
# Iterate through each column in the DataFrame
for col in df.columns:
# Determine the SQL data type based on the first non-null value in the column
first_non_null = df[col].dropna().iloc[0]
if isinstance(first_non_null, np.int64):
kind = "INTEGER"
elif isinstance(first_non_null, np.float64):
kind = "DECIMAL(10,2)"
elif isinstance(first_non_null, str):
kind = "VARCHAR(255)" # Assuming a default max length of 255
else:
kind = "VARCHAR(255)" # Default to VARCHAR for other types or customize as needed
# Sample a few example values
example_values = ', '.join([str(x) for x in df[col].dropna().unique()[0:4]])
# Append column info to the list
columns_info.append(f"{col} {kind}, -- Example values are {example_values}")
# Construct the CREATE TABLE statement
create_table_statement = "CREATE TABLE" + " " + table_name + " (\n " + ",\n ".join(columns_info) + "\n);"
# Adjust the statement to handle the final comma, primary keys, or other specifics
create_table_statement = create_table_statement.replace(",\n);", "\n);")
return create_table_statement
# SQL Schema for Table Jobs
df1_schema=create_schema(df1,df1_table_name)
# SQL Schema for Table Social
df2_schema=create_schema(df2,df2_table_name)
# SQL Schema for Table Movies
df3_schema=create_schema(df3,df3_table_name)
#---------------------#
# Prompt Templates #
#---------------------#
template="""
###System:
Generate a brief description of the below data. Be as detailed as possible.
###User:
{schema}
###Assistant:
"""
prompt=PromptTemplate(template=template,input_variables=["schema"])
#---------------------#
# Generate Description #
#---------------------#
def generate_description(schema):
prompt_filled=prompt.format(schema=schema)
result=pg.Completion.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
temperature=0.1,
max_tokens=300
)
return result['choices'][0]['text']
df1_desc=generate_description(df1_schema)
df2_desc=generate_description(df2_schema)
df3_desc=generate_description(df3_schema)
# Create Pandas DataFrame
df = pd.DataFrame({
'text': [df1_desc, df2_desc, df3_desc],
'table_name': [df1_table_name, df2_table_name, df3_table_name],
'schema': [df1_schema, df2_schema, df3_schema],
})
print(df)
def load_data():
if os.path.exists("schema.lancedb"):
shutil.rmtree("schema.lancedb")
os.mkdir("schema.lancedb")
db = lancedb.connect(uri)
batchModel = SentenceTransformer(name)
def batch_embed_func(batch):
return [batchModel.encode(sentence) for sentence in batch]
vecData = with_embeddings(batch_embed_func, df)
if "schema" not in db.table_names():
db.create_table("schema", data=vecData)
else:
table = db.open_table("schema")
table.add(data=vecData)
return
load_data()
print("Done") | [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((359, 391), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (370, 391), True, 'import pandas as pd\n'), ((429, 463), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (440, 463), True, 'import pandas as pd\n'), ((503, 537), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (514, 537), True, 'import pandas as pd\n'), ((623, 643), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (638, 643), False, 'import lancedb\n'), ((2866, 2927), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['schema']"}), "(template=template, input_variables=['schema'])\n", (2880, 2927), False, 'from langchain import PromptTemplate\n'), ((3432, 3607), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': [df1_desc, df2_desc, df3_desc], 'table_name': [df1_table_name,\n df2_table_name, df3_table_name], 'schema': [df1_schema, df2_schema,\n df3_schema]}"], {}), "({'text': [df1_desc, df2_desc, df3_desc], 'table_name': [\n df1_table_name, df2_table_name, df3_table_name], 'schema': [df1_schema,\n df2_schema, df3_schema]})\n", (3444, 3607), True, 'import pandas as pd\n'), ((732, 757), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (751, 757), False, 'from sentence_transformers import SentenceTransformer\n'), ((3095, 3198), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'temperature': '(0.1)', 'max_tokens': '(300)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n temperature=0.1, max_tokens=300)\n", (3115, 3198), True, 'import predictionguard as pg\n'), ((3651, 3683), 'os.path.exists', 'os.path.exists', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3665, 3683), False, 'import os\n'), ((3729, 3755), 'os.mkdir', 'os.mkdir', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3737, 3755), False, 'import os\n'), ((3765, 3785), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3780, 3785), False, 'import lancedb\n'), ((3808, 3833), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (3827, 3833), False, 'from sentence_transformers import SentenceTransformer\n'), ((3953, 3990), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['batch_embed_func', 'df'], {}), '(batch_embed_func, df)\n', (3968, 3990), False, 'from lancedb.embeddings import with_embeddings\n'), ((3693, 3724), 'shutil.rmtree', 'shutil.rmtree', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3706, 3724), False, 'import shutil\n')] |
import typer
import openai
from rag_app.models import TextChunk
from lancedb import connect
from typing import List
from pathlib import Path
from rich.console import Console
from rich.table import Table
from rich import box
import duckdb
app = typer.Typer()
@app.command(help="Query LanceDB for some results")
def db(
db_path: str = typer.Option(help="Your LanceDB path"),
table_name: str = typer.Option(help="Table to ingest data into"),
query: str = typer.Option(help="Text to query against existing vector db chunks"),
n: int = typer.Option(default=3, help="Maximum number of chunks to return"),
):
if not Path(db_path).exists():
raise ValueError(f"Database path {db_path} does not exist.")
db = connect(db_path)
db_table = db.open_table(table_name)
client = openai.OpenAI()
query_vector = (
client.embeddings.create(
input=query, model="text-embedding-3-large", dimensions=256
)
.data[0]
.embedding
)
results: List[TextChunk] = (
db_table.search(query_vector).limit(n).to_pydantic(TextChunk)
)
sql_table = db_table.to_lance()
df = duckdb.query(
"SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id"
).to_df()
doc_ids = df["doc_id"].to_list()
counts = df["count"].to_list()
doc_id_to_count = {id: chunk_count for id, chunk_count in zip(doc_ids, counts)}
table = Table(title="Results", box=box.HEAVY, padding=(1, 2), show_lines=True)
table.add_column("Post Title", style="green", max_width=30)
table.add_column("Content", style="magenta", max_width=120)
table.add_column("Chunk Number", style="yellow")
table.add_column("Publish Date", style="blue")
for result in results:
chunk_number = f"{result.chunk_id}"
table.add_row(
f"{result.post_title}({result.source})",
result.text,
f"{chunk_number}/{doc_id_to_count[result.doc_id]}",
result.publish_date.strftime("%Y-%m"),
)
Console().print(table)
| [
"lancedb.connect"
] | [((245, 258), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (256, 258), False, 'import typer\n'), ((340, 378), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (352, 378), False, 'import typer\n'), ((402, 448), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (414, 448), False, 'import typer\n'), ((467, 535), 'typer.Option', 'typer.Option', ([], {'help': '"""Text to query against existing vector db chunks"""'}), "(help='Text to query against existing vector db chunks')\n", (479, 535), False, 'import typer\n'), ((550, 616), 'typer.Option', 'typer.Option', ([], {'default': '(3)', 'help': '"""Maximum number of chunks to return"""'}), "(default=3, help='Maximum number of chunks to return')\n", (562, 616), False, 'import typer\n'), ((734, 750), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (741, 750), False, 'from lancedb import connect\n'), ((806, 821), 'openai.OpenAI', 'openai.OpenAI', ([], {}), '()\n', (819, 821), False, 'import openai\n'), ((1437, 1507), 'rich.table.Table', 'Table', ([], {'title': '"""Results"""', 'box': 'box.HEAVY', 'padding': '(1, 2)', 'show_lines': '(True)'}), "(title='Results', box=box.HEAVY, padding=(1, 2), show_lines=True)\n", (1442, 1507), False, 'from rich.table import Table\n'), ((1157, 1248), 'duckdb.query', 'duckdb.query', (['"""SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id"""'], {}), "(\n 'SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id')\n", (1169, 1248), False, 'import duckdb\n'), ((2042, 2051), 'rich.console.Console', 'Console', ([], {}), '()\n', (2049, 2051), False, 'from rich.console import Console\n'), ((632, 645), 'pathlib.Path', 'Path', (['db_path'], {}), '(db_path)\n', (636, 645), False, 'from pathlib import Path\n')] |
import typer
from lancedb import connect
from rag_app.models import TextChunk, Document
from pathlib import Path
from typing import Iterable
from tqdm import tqdm
from rich import print
import frontmatter
import hashlib
from datetime import datetime
from unstructured.partition.text import partition_text
app = typer.Typer()
def read_files(path: Path, file_suffix: str) -> Iterable[Document]:
for i, file in enumerate(path.iterdir()):
if file.suffix != file_suffix:
continue
post = frontmatter.load(file)
yield Document(
id=hashlib.md5(post.content.encode("utf-8")).hexdigest(),
content=post.content,
filename=file.name,
metadata=post.metadata,
)
def batch_chunks(chunks, batch_size=20):
batch = []
for chunk in chunks:
batch.append(chunk)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def chunk_text(
documents: Iterable[Document], window_size: int = 1024, overlap: int = 0
):
for doc in documents:
for chunk_num, chunk in enumerate(partition_text(text=doc.content)):
yield {
"doc_id": doc.id,
"chunk_id": chunk_num + 1,
"text": chunk.text,
"post_title": doc.metadata["title"],
"publish_date": datetime.strptime(doc.metadata["date"], "%Y-%m"),
"source": doc.metadata["url"],
}
@app.command(help="Ingest data into a given lancedb")
def from_folder(
db_path: str = typer.Option(help="Your LanceDB path"),
table_name: str = typer.Option(help="Table to ingest data into"),
folder_path: str = typer.Option(help="Folder to read data from"),
file_suffix: str = typer.Option(default=".md", help="File suffix to filter by"),
):
db = connect(db_path)
if table_name not in db.table_names():
db.create_table(table_name, schema=TextChunk, mode="overwrite")
table = db.open_table(table_name)
path = Path(folder_path)
if not path.exists():
raise ValueError(f"Ingestion folder of {folder_path} does not exist")
files = read_files(path, file_suffix)
chunks = chunk_text(files)
batched_chunks = batch_chunks(chunks)
ttl = 0
for chunk_batch in tqdm(batched_chunks):
table.add(chunk_batch)
ttl += len(chunk_batch)
print(f"Added {ttl} chunks to {table_name}")
| [
"lancedb.connect"
] | [((312, 325), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (323, 325), False, 'import typer\n'), ((1598, 1636), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (1610, 1636), False, 'import typer\n'), ((1660, 1706), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (1672, 1706), False, 'import typer\n'), ((1731, 1776), 'typer.Option', 'typer.Option', ([], {'help': '"""Folder to read data from"""'}), "(help='Folder to read data from')\n", (1743, 1776), False, 'import typer\n'), ((1801, 1861), 'typer.Option', 'typer.Option', ([], {'default': '""".md"""', 'help': '"""File suffix to filter by"""'}), "(default='.md', help='File suffix to filter by')\n", (1813, 1861), False, 'import typer\n'), ((1875, 1891), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (1882, 1891), False, 'from lancedb import connect\n'), ((2058, 2075), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2062, 2075), False, 'from pathlib import Path\n'), ((2333, 2353), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2337, 2353), False, 'from tqdm import tqdm\n'), ((2423, 2467), 'rich.print', 'print', (['f"""Added {ttl} chunks to {table_name}"""'], {}), "(f'Added {ttl} chunks to {table_name}')\n", (2428, 2467), False, 'from rich import print\n'), ((517, 539), 'frontmatter.load', 'frontmatter.load', (['file'], {}), '(file)\n', (533, 539), False, 'import frontmatter\n'), ((1142, 1174), 'unstructured.partition.text.partition_text', 'partition_text', ([], {'text': 'doc.content'}), '(text=doc.content)\n', (1156, 1174), False, 'from unstructured.partition.text import partition_text\n'), ((1395, 1443), 'datetime.datetime.strptime', 'datetime.strptime', (["doc.metadata['date']", '"""%Y-%m"""'], {}), "(doc.metadata['date'], '%Y-%m')\n", (1412, 1443), False, 'from datetime import datetime\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events to help us improve LanceDB.
These diagnostics are used only for error reporting and no data is collected. You can find more about diagnosis on
our docs: https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
import json
from sentence_transformers import SentenceTransformer
from pydantic.main import ModelMetaclass
from pathlib import Path
import pandas as pd
import sqlite3
from uuid import uuid4
import lancedb
encoder = SentenceTransformer('all-MiniLM-L6-v2')
data_folder = Path('data/collections')
config_file = Path('data/config/indexes.yaml')
index_folder = Path('indexes')
lance_folder = Path('indexes')
lance_folder.mkdir(parents=True, exist_ok=True)
sqlite_folder = Path('data/indexes/')
with sqlite3.connect(sqlite_folder.joinpath('documents.sqlite')) as conn:
cursor = conn.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
print(f"Sqlite version: {data}")
class LanceDBDocument():
def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None):
self.document = self.fill_missing_fields(document, text, title, tags, date)
# self.text = document[text]
# self.tags = document[tags] if tags is not None else list()
# self.date = document[date] if date is not None else None
self.file_path = file_path
self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]}
self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid']
self.save_uuids = list()
self.sqlite_fields = list()
self.lance_exclude = list()
def fill_missing_fields(self, document, text, title, tags, date):
if title not in document:
self.title = ''
else:
self.title = document[title]
if text not in document:
self.text = ''
else:
self.text = document[text]
if date not in document:
self.date = ''
else:
self.date = document[date]
if tags not in document:
self.tags = list()
else:
self.tags = document[tags]
def create_json_document(self, text, uuids=None):
"""Creates a custom dictionary object that can be used for both sqlite and lancedb
The full document is always stored in sqlite where fixed fields are:
title
text
date
filepath
document_uuid - used for retrieval from lancedb results
Json field contains the whole document for retrieval and display
Lancedb only gets searching text, vectorization of that, and filter fields
"""
_document = {'title':self.title,
'text':text,
'tags':self.tags,
'date':self.date,
'file_path':str(self.file_path),
'uuid':self.uuid,
'metadata': self.metadata}
self._enforce_tags_schema()
for field in ['title','date','file_path']:
self.enforce_string_schema(field, _document)
return _document
def enforce_string_schema(self, field, test_document):
if not isinstance(test_document[field], str):
self.lance_exclude.append(field)
def _enforce_tags_schema(self):
# This enforces a simple List[str] format for the tags to match what lancedb can use for filtering
# If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval
if isinstance(self.tags, list):
tags_are_list = True
for _tag in self.tags:
if not isinstance(_tag, str):
tags_are_list = False
break
if not tags_are_list:
self.lance_exclude.append('tags')
def return_document(self):
document = self.create_json_document(self.text)
return document
class SqlLiteIngestNotes():
def __init__(self, documents, source_file, db_location, index_name, overwrite):
self.documents = documents
self.source_file = source_file
self.db_location = db_location
self.index_name = index_name
self.overwrite = overwrite
def initialize(self):
self.connection = sqlite3.connect(self.db_location)
if self.overwrite:
self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""")
self.connection.commit()
table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall()
if len(table_exists) == 0:
self.connection.execute(f"""
CREATE TABLE {self.index_name}(
id INTEGER PRIMARY KEY NOT NULL,
uuid STRING NOT NULL UNIQUE,
text STRING NOT NULL,
title STRING,
date STRING,
source_file STRING,
metadata JSONB);""")
self.connection.commit()
def insert(self, document):
self.connection.execute(f"""INSERT OR IGNORE INTO
{self.index_name} (uuid, text, title, date, source_file, metadata)
VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}',
'{document.title.replace("'","''")}', '{document.date.replace("'","''")}',
'{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""")
def bulk_insert(self):
for document in self.documents:
self.insert(document)
self.connection.commit()
self.connection.close()
from lancedb.pydantic import LanceModel, Vector, List
class LanceDBSchema384(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(384)
class LanceDBSchema512(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(512)
class LanceDBIngestNotes():
def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema):
self.documents = documents
self.lance_location = lance_location
self.index_name = index_name
self.overwrite = overwrite
self.encoder = encoder
self.schema = schema
def initialize(self):
self.db = lancedb.connect(self.lance_location)
existing_tables = self.db.table_names()
self.documents = [self.prep_documents(document) for document in self.documents]
if self.overwrite:
self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema())
else:
if self.index_name in existing_tables:
self.table = self.db.open_table(self.index_name)
self.table.add(self.documents)
else:
self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema())
def prep_documents(self, document):
lance_document = dict()
lance_document['text'] = document.text
lance_document['vector'] = self.encoder.encode(document.text)
lance_document['uuid'] = document.uuid
lance_document['title'] = document.title
lance_document['tags'] = document.tags
return lance_document
def insert(self, document):
document['vector'] = self.encoder.encode(document.text)
self.table.add(document)
def bulk_insert(self, create_vectors=False):
if create_vectors:
self.table.create_index(vector_column_name='vector', metric='cosine')
self.table.create_fts_index(field_names=['title','text'], replace=True)
return self.table
class IndexDocumentsNotes():
def __init__(self,field_mapping, source_file, index_name, overwrite):
self.field_mapping = field_mapping
self.source_file = source_file
self.index_name = index_name
self.overwrite = overwrite
def open_json(self):
with open(self.source_file, 'r') as f:
self.data = json.load(f)
print(self.data)
def open_csv(self):
self.data = pd.read_csv(self.source_file)
def create_document(self, document):
document = LanceDBDocument(document,
text=self.field_mapping['text'],
title=self.field_mapping['title'],
tags=self.field_mapping['tags'],
date=self.field_mapping['date'],
fields=list(document.keys()),
file_path=self.source_file
)
return document
def create_documents(self):
self.documents = [self.create_document(document) for document in self.data]
def ingest(self, overwrite=False):
# lance_path = Path(f'../indexes/lance')
lance_folder.mkdir(parents=True, exist_ok=True)
lance_ingest = LanceDBIngestNotes(documents=self.documents,
lance_location=lance_folder,
# field_mapping=self.field_mapping,
index_name=self.index_name,
overwrite=self.overwrite,
encoder=encoder,
schema=LanceDBSchema384)
lance_ingest.initialize()
if len(self.documents) <= 256:
_table = lance_ingest.bulk_insert(create_vectors=False)
else:
_table = lance_ingest.bulk_insert(create_vectors=True)
sql_path = sqlite_folder.joinpath('documents.sqlite')
sqlite_ingest = SqlLiteIngestNotes(documents=self.documents,
source_file=self.source_file,
db_location=sql_path,
index_name=self.index_name,
overwrite=self.overwrite)
sqlite_ingest.initialize()
sqlite_ingest.bulk_insert()
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5615, 5626), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5621, 5626), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5739, 5750), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5745, 5750), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((4114, 4147), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (4129, 4147), False, 'import sqlite3\n'), ((6128, 6164), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (6143, 6164), False, 'import lancedb\n'), ((7989, 8018), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (8000, 8018), True, 'import pandas as pd\n'), ((7902, 7914), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7911, 7914), False, 'import json\n'), ((1248, 1255), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1253, 1255), False, 'from uuid import uuid4\n'), ((5257, 5286), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (5267, 5286), False, 'import json\n')] |
import argparse
import pandas as pd
from unstructured.partition.pdf import partition_pdf
import lancedb.embeddings.gte
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
def split_text_into_chunks(text, chunk_size, overlap):
"""
Split text into chunks with a specified size and overlap.
Parameters:
- text (str): The input text to be split into chunks.
- chunk_size (int): The size of each chunk.
- overlap (int): The number of characters to overlap between consecutive chunks.
Returns:
- List of chunks (str).
"""
if chunk_size <= 0 or overlap < 0:
raise ValueError("Invalid chunk size or overlap value.")
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunk = text[start:end]
chunks.append(chunk)
start += chunk_size - overlap
return chunks
def pdf_to_lancedb(pdf_file: str, path: str = "/tmp/lancedb"):
"""
create lancedb table from a pdf file
Parameters:
- pdf_file (str): The path to the input PDF file.
- path (str): The path to store the vector DB.
default: /tmp/lancedb
Returns:
- None
"""
elements = partition_pdf(pdf_file)
content = "\n\n".join([e.text for e in elements])
chunks = split_text_into_chunks(text=content, chunk_size=1000, overlap=200)
model = (
get_registry().get("gte-text").create(mlx=True)
) # mlx=True for Apple silicon only.
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": chunks})
db = lancedb.connect(path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a Vector DB from a PDF file")
# Input
parser.add_argument(
"--pdf",
help="The path to the input PDF file",
default="flash_attention.pdf",
)
# Output
parser.add_argument(
"--db_path",
type=str,
default="/tmp/lancedb",
help="The path to store the vector DB",
)
args = parser.parse_args()
pdf_to_lancedb(args.pdf, args.db_path)
print("ingestion done , move to query!")
| [
"lancedb.embeddings.get_registry"
] | [((1242, 1265), 'unstructured.partition.pdf.partition_pdf', 'partition_pdf', (['pdf_file'], {}), '(pdf_file)\n', (1255, 1265), False, 'from unstructured.partition.pdf import partition_pdf\n'), ((1658, 1688), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': chunks}"], {}), "({'text': chunks})\n", (1670, 1688), True, 'import pandas as pd\n'), ((1864, 1937), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a Vector DB from a PDF file"""'}), "(description='Create a Vector DB from a PDF file')\n", (1887, 1937), False, 'import argparse\n'), ((1424, 1438), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1436, 1438), False, 'from lancedb.embeddings import get_registry\n')] |
import os
import shutil
from pathlib import Path
import lancedb
from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
# LanceDB pydantic schema
class Content(LanceModel):
text: str
vector: Vector(384)
def get_files() -> list[str]:
# Get a list of files from the data directory
data_dir = Path("../data")
txt_files = list(data_dir.glob("*.txt"))
# Return string of paths or else lancedb/pydantic will complain
txt_files = [str(f) for f in txt_files]
return txt_files
def get_docs(txt_files: list[str]):
loaders = [TextLoader(f) for f in txt_files]
docs = [loader.load() for loader in loaders]
return docs
def create_lance_table(table_name: str) -> lancedb.table.LanceTable:
try:
# Create empty table if it does not exist
tbl = db.create_table(table_name, schema=pydantic_to_schema(Content), mode="overwrite")
except OSError:
# If table exists, open it
tbl = db.open_table(table_name, mode="append")
return tbl
async def search_lancedb(query: str, retriever: LanceDB) -> list[Content]:
"Perform async retrieval from LanceDB"
search_result = await retriever.asimilarity_search(query, k=5)
if len(search_result) > 0:
print(search_result[0].page_content)
else:
print("Failed to find similar result")
return search_result
def main() -> None:
txt_files = get_files()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}
)
tbl = create_lance_table("countries")
docs = get_docs(txt_files)
chunked_docs = []
for doc in docs:
chunked_docs.extend(text_splitter.split_documents(doc))
# Ingest docs in append mode
retriever = LanceDB.from_documents(chunked_docs, embeddings, connection=tbl)
return retriever
if __name__ == "__main__":
DB_NAME = "./db"
TABLE = "countries"
if os.path.exists(DB_NAME):
# Clear DB if it exists
shutil.rmtree(DB_NAME)
db = lancedb.connect(DB_NAME)
retriever = main()
print("Finished loading documents to LanceDB")
query = "Is Tonga a monarchy or a democracy"
docsearch = retriever.as_retriever(
search_kwargs={"k": 3, "threshold": 0.8, "return_vector": False}
)
search_result = docsearch.get_relevant_documents(query)
if len(search_result) > 0:
print(f"Found {len(search_result)} relevant results")
print([r.page_content for r in search_result])
else:
print("Failed to find relevant result") | [
"lancedb.pydantic.Vector",
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((429, 440), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (435, 440), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n'), ((538, 553), 'pathlib.Path', 'Path', (['"""../data"""'], {}), "('../data')\n", (542, 553), False, 'from pathlib import Path\n'), ((1650, 1714), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(chunk_size=512, chunk_overlap=50)\n', (1680, 1714), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1732, 1842), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1753, 1842), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((2082, 2146), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['chunked_docs', 'embeddings'], {'connection': 'tbl'}), '(chunked_docs, embeddings, connection=tbl)\n', (2104, 2146), False, 'from langchain.vectorstores import LanceDB\n'), ((2249, 2272), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (2263, 2272), False, 'import os\n'), ((2347, 2371), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2362, 2371), False, 'import lancedb\n'), ((785, 798), 'langchain.document_loaders.TextLoader', 'TextLoader', (['f'], {}), '(f)\n', (795, 798), False, 'from langchain.document_loaders import TextLoader\n'), ((2314, 2336), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (2327, 2336), False, 'import shutil\n'), ((1063, 1090), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['Content'], {}), '(Content)\n', (1081, 1090), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n')] |
import lancedb
import uuid
from datetime import datetime
from tqdm import tqdm
from typing import Optional, List, Iterator, Dict
from memgpt.config import MemGPTConfig
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
from memgpt.data_types import Record, Message, Passage, Source
from datetime import datetime
from lancedb.pydantic import Vector, LanceModel
""" Initial implementation - not complete """
def get_db_model(table_name: str, table_type: TableType):
config = MemGPTConfig.load()
if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:
# create schema for archival memory
class PassageModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
id: uuid.UUID
user_id: str
text: str
doc_id: str
agent_id: str
data_source: str
embedding: Vector(config.embedding_dim)
metadata_: Dict
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Passage(
text=self.text,
embedding=self.embedding,
doc_id=self.doc_id,
user_id=self.user_id,
id=self.id,
data_source=self.data_source,
agent_id=self.agent_id,
metadata=self.metadata_,
)
return PassageModel
elif table_type == TableType.RECALL_MEMORY:
class MessageModel(LanceModel):
"""Defines data model for storing Message objects"""
__abstract__ = True # this line is necessary
# Assuming message_id is the primary key
id: uuid.UUID
user_id: str
agent_id: str
# openai info
role: str
text: str
model: str
user: str
# function info
function_name: str
function_args: str
function_response: str
embedding = Vector(config.embedding_dim)
# Add a datetime column, with default value as the current time
created_at = datetime
def __repr__(self):
return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Message(
user_id=self.user_id,
agent_id=self.agent_id,
role=self.role,
name=self.name,
text=self.text,
model=self.model,
function_name=self.function_name,
function_args=self.function_args,
function_response=self.function_response,
embedding=self.embedding,
created_at=self.created_at,
id=self.id,
)
"""Create database model for table_name"""
return MessageModel
elif table_type == TableType.DATA_SOURCES:
class SourceModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
# Assuming passage_id is the primary key
id: uuid.UUID
user_id: str
name: str
created_at: datetime
def __repr__(self):
return f"<Source(passage_id='{self.id}', name='{self.name}')>"
def to_record(self):
return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at)
"""Create database model for table_name"""
return SourceModel
else:
raise ValueError(f"Table type {table_type} not implemented")
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
# TODO
pass
def generate_where_filter(self, filters: Dict) -> str:
where_filters = []
for key, value in filters.items():
where_filters.append(f"{key}={value}")
return where_filters.join(" AND ")
@abstractmethod
def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:
# TODO
pass
@abstractmethod
def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:
# TODO
pass
@abstractmethod
def get(self, id: str) -> Optional[Record]:
# TODO
pass
@abstractmethod
def size(self, filters: Optional[Dict] = {}) -> int:
# TODO
pass
@abstractmethod
def insert(self, record: Record):
# TODO
pass
@abstractmethod
def insert_many(self, records: List[Record], show_progress=False):
# TODO
pass
@abstractmethod
def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:
# TODO
pass
@abstractmethod
def query_date(self, start_date, end_date):
# TODO
pass
@abstractmethod
def query_text(self, query):
# TODO
pass
@abstractmethod
def delete_table(self):
# TODO
pass
@abstractmethod
def delete(self, filters: Optional[Dict] = {}):
# TODO
pass
@abstractmethod
def save(self):
# TODO
pass
| [
"lancedb.pydantic.Vector"
] | [((623, 642), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (640, 642), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1078, 1106), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1084, 1106), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1334, 1524), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1341, 1524), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2336, 2364), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2342, 2364), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2675, 2990), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'name': 'self.name', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, name=\n self.name, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2682, 2990), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3816, 3905), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3822, 3905), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')] |
import os
import argparse
import lancedb
from lancedb.context import contextualize
from lancedb.embeddings import with_embeddings
from datasets import load_dataset
import openai
import pytest
OPENAI_MODEL = None
def embed_func(c):
rs = openai.Embedding.create(input=c, engine=OPENAI_MODEL)
return [record["embedding"] for record in rs["data"]]
def create_prompt(query, context):
limit = 3750
prompt_start = "Answer the question based on the context below.\n\n" + "Context:\n"
prompt_end = f"\n\nQuestion: {query}\nAnswer:"
# append contexts until hitting limit
for i in range(1, len(context)):
if len("\n\n---\n\n".join(context.text[:i])) >= limit:
prompt = (
prompt_start + "\n\n---\n\n".join(context.text[: i - 1]) + prompt_end
)
break
elif i == len(context) - 1:
prompt = prompt_start + "\n\n---\n\n".join(context.text) + prompt_end
return prompt
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine=OPENAI_MODEL,
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None,
)
return res["choices"][0]["text"].strip()
def arg_parse():
default_query = "Which training method should I use for sentence transformers when I only have pairs of related sentences?"
global OPENAI_MODEL
parser = argparse.ArgumentParser(description="Youtube Search QA Bot")
parser.add_argument(
"--query", type=str, default=default_query, help="query to search"
)
parser.add_argument(
"--context-length",
type=int,
default=3,
help="Number of queries to use as context",
)
parser.add_argument("--window-size", type=int, default=20, help="window size")
parser.add_argument("--stride", type=int, default=4, help="stride")
parser.add_argument("--openai-key", type=str, help="OpenAI API Key")
parser.add_argument(
"--model", type=str, default="text-embedding-ada-002", help="OpenAI API Key"
)
args = parser.parse_args()
if not args.openai_key:
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable not set. Please set it or pass --openai_key"
)
else:
openai.api_key = args.openai_key
OPENAI_MODEL = args.model
return args
if __name__ == "__main__":
args = arg_parse()
db = lancedb.connect("~/tmp/lancedb")
table_name = "youtube-chatbot"
if table_name not in db.table_names():
assert len(openai.Model.list()["data"]) > 0
data = load_dataset("jamescalam/youtube-transcriptions", split="train")
df = (
contextualize(data.to_pandas())
.groupby("title")
.text_col("text")
.window(args.window_size)
.stride(args.stride)
.to_df()
)
data = with_embeddings(embed_func, df, show_progress=True)
data.to_pandas().head(1)
tbl = db.create_table(table_name, data)
print(f"Created LaneDB table of length: {len(tbl)}")
else:
tbl = db.open_table(table_name)
load_dataset("jamescalam/youtube-transcriptions", split="train")
emb = embed_func(args.query)[0]
context = tbl.search(emb).limit(args.context_length).to_df()
prompt = create_prompt(args.query, context)
complete(prompt)
top_match = context.iloc[0]
print(f"Top Match: {top_match['url']}&t={top_match['start']}")
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((243, 296), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': 'OPENAI_MODEL'}), '(input=c, engine=OPENAI_MODEL)\n', (266, 296), False, 'import openai\n'), ((1031, 1192), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'OPENAI_MODEL', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), '(engine=OPENAI_MODEL, prompt=prompt, temperature=0,\n max_tokens=400, top_p=1, frequency_penalty=0, presence_penalty=0, stop=None\n )\n', (1055, 1192), False, 'import openai\n'), ((1485, 1545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Youtube Search QA Bot"""'}), "(description='Youtube Search QA Bot')\n", (1508, 1545), False, 'import argparse\n'), ((2553, 2585), 'lancedb.connect', 'lancedb.connect', (['"""~/tmp/lancedb"""'], {}), "('~/tmp/lancedb')\n", (2568, 2585), False, 'import lancedb\n'), ((3281, 3345), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (3293, 3345), False, 'from datasets import load_dataset\n'), ((2731, 2795), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (2743, 2795), False, 'from datasets import load_dataset\n'), ((3032, 3083), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'show_progress': '(True)'}), '(embed_func, df, show_progress=True)\n', (3047, 3083), False, 'from lancedb.embeddings import with_embeddings\n'), ((2683, 2702), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (2700, 2702), False, 'import openai\n')] |
import os, time
import pandas as pd
import numpy as np
from collections import Counter
from .utils import abbreviate_book_name_in_full_reference, get_train_test_split_from_verse_list, embed_batch
from .types import TranslationTriplet, ChatResponse, VerseMap, AIResponse
from pydantic import BaseModel, Field
from typing import Any, List, Optional, Callable
from random import shuffle
import requests
import guidance
import lancedb
from lancedb.embeddings import with_embeddings
from nltk.util import ngrams
from nltk import FreqDist
import logging
logger = logging.getLogger('uvicorn')
machine = 'http://192.168.1.76:8081'
def get_dataframes(target_language_code=None, file_suffix=None):
"""Get source data dataframes (literalistic english Bible and macula Greek/Hebrew)"""
bsb_bible_df = pd.read_csv('data/bsb-utf8.txt', sep='\t', names=['vref', 'content'], header=0)
bsb_bible_df['vref'] = bsb_bible_df['vref'].apply(abbreviate_book_name_in_full_reference)
macula_df = pd.read_csv('data/combined_greek_hebrew_vref.csv') # Note: csv wrangled in notebook: `create-combined-macula-df.ipynb`
if target_language_code:
target_tsv = get_target_vref_df(target_language_code, file_suffix=file_suffix)
target_df = get_target_vref_df(target_language_code, file_suffix=file_suffix)
return bsb_bible_df, macula_df, target_df
else:
return bsb_bible_df, macula_df
def get_vref_list(book_abbreviation=None):
vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt'
if not os.path.exists('data/vref.txt'):
os.system(f'wget {vref_url} -O data/vref.txt')
with open('data/vref.txt', 'r', encoding="utf8") as f:
if book_abbreviation:
return [i.strip() for i in f.readlines() if i.startswith(book_abbreviation)]
else:
return list(set([i.strip().split(' ')[0] for i in f.readlines()]))
def get_target_vref_df(language_code, file_suffix=None, drop_empty_verses=False):
"""Get target language data by language code"""
if not len(language_code) == 3:
return 'Invalid language code. Please use 3-letter ISO 639-3 language code.'
language_code = language_code.lower().strip()
language_code = f'{language_code}-{language_code}'
# if file_suffix:
# print('adding file suffix', file_suffix)
language_code = f'{language_code}{file_suffix if file_suffix else ""}'
target_data_url = f'https://raw.githubusercontent.com/BibleNLP/ebible/main/corpus/{language_code}.txt'
path = f'data/{language_code}.txt'
if not os.path.exists(path):
try:
os.system(f'wget {target_data_url} -O {path}')
except:
return 'No data found for language code. Please check the eBible repo for available data.'
with open(path, 'r', encoding="utf8") as f:
target_text = f.readlines()
target_text = [i.strip() for i in target_text]
vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt'
if not os.path.exists('data/vref.txt'):
os.system(f'wget {vref_url} -O data/vref.txt')
with open('data/vref.txt', 'r', encoding="utf8") as f:
target_vref = f.readlines()
target_vref = [i.strip() for i in target_vref]
target_tsv = [i for i in list(zip(target_vref, target_text))]
if drop_empty_verses:
target_tsv = [i for i in target_tsv if i[1] != '']
target_df = pd.DataFrame(target_tsv, columns=['vref', 'content'])
return target_df
from pandas import DataFrame as DataFrameClass
def create_lancedb_table_from_df(df: DataFrameClass, table_name, content_column_name='content'):
"""Turn a pandas dataframe into a LanceDB table."""
start_time = time.time()
logger.info('Creating LanceDB table...')
import lancedb
from lancedb.embeddings import with_embeddings
logger.error(f'Creating LanceDB table: {table_name}, {df.head}')
# rename 'content' field as 'text' as lancedb expects
try:
df = df.rename(columns={content_column_name: 'text'})
except:
assert 'text' in df.columns, 'Please rename the content column to "text" or specify the column name in the function call.'
# Add target_language_code to the dataframe
df['language_code'] = table_name
# mkdir lancedb if it doesn't exist
if not os.path.exists('./lancedb'):
os.mkdir('./lancedb')
# Connect to LanceDB
db = lancedb.connect("./lancedb")
table = get_table_from_database(table_name)
if not table:
# If it doesn't exist, create it
df_filtered = df[df['text'].str.strip() != '']
# data = with_embeddings(embed_batch, df_filtered.sample(1000)) # FIXME: I can't process the entirety of the bsb bible for some reason. Something is corrupt or malformed in the data perhaps
data = with_embeddings(embed_batch, df_filtered)
# data = with_embeddings(embed_batch, df)
table = db.create_table(
table_name,
data=data,
mode="create",
)
else:
# If it exists, append to it
df_filtered = df[df['text'].str.strip() != '']
data = with_embeddings(embed_batch, df_filtered.sample(10000))
data = data.fillna(0) # Fill missing values with 0
table.append(data)
print('LanceDB table created. Time elapsed: ', time.time() - start_time, 'seconds.')
return table
def load_database(target_language_code=None, file_suffix=None):
print('Loading dataframes...')
if target_language_code:
print(f'Loading target language data for {target_language_code} (suffix: {file_suffix})...')
bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code, file_suffix=file_suffix)
else:
print('No target language code specified. Loading English and Greek/Hebrew data only.')
bsb_bible_df, macula_df = get_dataframes()
target_df = None
print('Creating tables...')
# table_name = 'verses'
# create_lancedb_table_from_df(bsb_bible_df, table_name)
# create_lancedb_table_from_df(macula_df, table_name)
create_lancedb_table_from_df(bsb_bible_df, 'bsb_bible')
create_lancedb_table_from_df(macula_df, 'macula')
if target_df is not None:
print('Creating target language tables...')
# create_lancedb_table_from_df(target_df, table_name)
target_table_name = target_language_code if not file_suffix else f'{target_language_code}{file_suffix}'
create_lancedb_table_from_df(target_df, target_table_name)
print('Database populated.')
return True
def get_table_from_database(table_name):
"""
Returns a table by name.
Use '/api/db_info' endpoint to see available tables.
"""
import lancedb
db = lancedb.connect("./lancedb")
table_names = db.table_names()
if table_name not in table_names:
logger.error(f'''Table {table_name} not found. Please check the table name and try again.
Available tables: {table_names}''')
return None
table = db.open_table(table_name)
return table
def get_verse_triplet(full_verse_ref: str, language_code: str, bsb_bible_df, macula_df):
"""
Get verse from bsb_bible_df,
AND macula_df (greek and hebrew)
AND target_vref_data (target language)
e.g., http://localhost:3000/api/verse/GEN%202:19&aai
or NT: http://localhost:3000/api/verse/ROM%202:19&aai
"""
bsb_row = bsb_bible_df[bsb_bible_df['vref'] == full_verse_ref]
macula_row = macula_df[macula_df['vref'] == full_verse_ref]
target_df = get_target_vref_df(language_code)
target_row = target_df[target_df['vref'] == full_verse_ref]
if not bsb_row.empty and not macula_row.empty:
return {
'bsb': {
'verse_number': int(bsb_row.index[0]),
'vref': bsb_row['vref'][bsb_row.index[0]],
'content': bsb_row['content'][bsb_row.index[0]]
},
'macula': {
'verse_number': int(macula_row.index[0]),
'vref': macula_row['vref'][macula_row.index[0]],
'content': macula_row['content'][macula_row.index[0]]
},
'target': {
'verse_number': int(target_row.index[0]),
'vref': target_row['vref'][target_row.index[0]],
'content': target_row['content'][target_row.index[0]]
}
}
else:
return None
def query_lancedb_table(language_code: str, query: str, limit: str='50'):
"""Get similar sentences from a LanceDB table."""
# limit = int(limit) # I don't know if this is necessary. The FastAPI endpoint might infer an int from the query param if I typed it that way
table = get_table_from_database(language_code)
query_vector = embed_batch([query])[0]
if not table:
return {'error':'table not found'}
result = table.search(query_vector).limit(limit).to_df().to_dict()
if not result.values():
return []
texts = result['text']
# scores = result['_distance']
vrefs = result['vref']
output = []
for i in range(len(texts)):
output.append({
'text': texts[i],
# 'score': scores[i],
'vref': vrefs[i]
})
return output
def get_unique_tokens_for_language(language_code):
"""Get unique tokens for a language"""
tokens_to_ignore = ['']
if language_code == 'bsb' or language_code =='bsb_bible':
df, _, _ = get_dataframes()
elif language_code =='macula':
_, df, _ = get_dataframes()
else:
_, _, df = get_dataframes(target_language_code=language_code)
target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist()
target_tokens = [token for token in target_tokens if token not in tokens_to_ignore]
unique_tokens = Counter(target_tokens)
return unique_tokens
def get_ngrams(language_code: str, size: int=2, n=100, string_filter: list[str]=[]):
"""Get ngrams with frequencies for a language
Params:
- language_code (str): language code
- size (int): ngram size
- n (int): max number of ngrams to return
- string_filter (list[str]): if passed, only return ngrams where all ngram tokens are contained in string_filter
A string_filter might be, for example, a tokenized sentence where you want to detect ngrams relative to the entire corpus.
NOTE: calculating these is not slow, and it is assumed that the corpus itself will change during iterative translation
If it winds up being slow, we can cache the results and only recalculate when the corpus changes. # ?FIXME
"""
tokens_to_ignore = ['']
# TODO: use a real character filter. I'm sure NLTK has something built in
if language_code == 'bsb' or language_code =='bsb_bible':
df, _, _ = get_dataframes()
elif language_code =='macula':
_, df, _ = get_dataframes()
else:
_, _, df = get_dataframes(target_language_code=language_code)
target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist()
target_tokens = [token for token in target_tokens if token not in tokens_to_ignore]
n_grams = [tuple(gram) for gram in ngrams(target_tokens, size)]
print('ngrams before string_filter:', len(n_grams))
if string_filter:
print('filtering with string_filter')
n_grams = [gram for gram in n_grams if all(token in string_filter for token in gram)]
freq_dist = FreqDist(n_grams)
print('ngrams after string_filter:', len(n_grams))
return list(freq_dist.most_common(n))
def build_translation_prompt(
vref,
target_language_code,
source_language_code=None,
bsb_bible_df=None,
macula_df=None,
number_of_examples=3,
backtranslate=False) -> dict[str, TranslationTriplet]:
"""Build a prompt for translation"""
if bsb_bible_df is None or bsb_bible_df.empty or macula_df is None or macula_df.empty: # build bsb_bible_df and macula_df only if not supplied (saves overhead)
bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code=target_language_code)
if source_language_code:
_, _, source_df = get_dataframes(target_language_code=source_language_code)
else:
source_df = bsb_bible_df
# Query the LanceDB table for the most similar verses to the source text (or bsb if source_language_code is None)
table_name = source_language_code if source_language_code else 'bsb_bible'
query = source_df[source_df['vref']==vref]['content'].values[0]
original_language_source = macula_df[macula_df['vref']==vref]['content'].values[0]
print(f'Query result: {query}')
similar_verses = query_lancedb_table(table_name, query, limit=number_of_examples) # FIXME: query 50 and then filter to first n that have target content?
triplets = [get_verse_triplet(similar_verse['vref'], target_language_code, bsb_bible_df, macula_df) for similar_verse in similar_verses]
target_verse = target_df[target_df['vref']==vref]['content'].values[0]
# Initialize an empty dictionary to store the JSON objects
json_objects: dict[str, TranslationTriplet] = dict()
for triplet in triplets:
# Create a JSON object for each triplet with top-level keys being the VREFs
json_objects[triplet["bsb"]["vref"]] = TranslationTriplet(
source=triplet["macula"]["content"],
bridge_translation=triplet["bsb"]["content"],
target=triplet["target"]["content"] # FIXME: validate that content exists here?
).to_dict()
# Add the source verse Greek/Hebrew and English reference to the JSON objects
json_objects[vref] = TranslationTriplet(
source=original_language_source,
bridge_translation=query,
target=target_verse
).to_dict()
return json_objects
def execute_discriminator_evaluation(verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse:
"""
Accepts an array of verses as verse_triplets.
The final triplet is assumed to be the hypothesis.
The hypothesis string is assumed to be the target language rendering.
This simple discriminator type of evaluation scrambles the input verse_triplets
and prompts the LLM to detect which is the hypothesis.
The return value is:
{
'y_index': index_of_hypothesis,
'y_hat_index': llm_predicted_index,
'rationale': rationale_string,
}
If you introduce any intermediate translation steps (e.g., leaving unknown tokens untranslated),
then this type of evaluation is not recommended.
"""
hypothesis_triplet = verse_triplets[hypothesis_vref]
print(f'Hypothesis: {hypothesis_triplet}')
verse_triplets_list: list[tuple] = list(verse_triplets.items())
print('Verse triplets keys:', [k for k, v in verse_triplets_list])
# # Shuffle the verse_triplets
shuffle(verse_triplets_list)
print(f'Shuffled verse triplets keys: {[k for k, v in verse_triplets_list]}')
# # Build the prompt
prompt = ''
for i, triplet in enumerate(verse_triplets_list):
print(f'Verse triplet {i}: {triplet}')
prompt += f'\n{triplet[0]}. Target: {triplet[1]["target"]}'
url = f"{machine}/v1/chat/completions"
headers = {
"Content-Type": "application/json",
}
payload = {
"messages": [
# FIXME: I think I should just ask the model to designate which verse stands out as the least likely to be correct.
{"role": "user", "content": f"### Instruction: One of these translations is incorrect, and you can only try to determine by comparing the examples given:\n{prompt}\nWhich one of these is incorrect? (show only '[put verse ref here] -- rationale as to why you picked this one relative only to the other options')\n###Response:"}
],
"temperature": 0.7,
"max_tokens": -1,
"stream": False,
}
response = requests.post(url, json=payload, headers=headers)
return response.json()
def execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=None, macula_df=None, number_of_examples=3, backtranslate=False) -> ChatResponse:
prompt = build_translation_prompt(vref, target_language_code, source_language_code, bsb_bible_df, macula_df, number_of_examples, backtranslate)
url = f"{machine}/v1/chat/completions"
headers = {
"Content-Type": "application/json",
}
payload = {
"messages": [
{"role": "user", "content": prompt}
],
"temperature": 0.7,
"max_tokens": -1,
"stream": False,
}
response = requests.post(url, json=payload, headers=headers)
return response.json()
class RevisionLoop(BaseModel):
# FIXME: this loop should only work for (revise-evaluate)*n, where you start with a translation draft.
# TODO: implement a revision function whose output could be evaluated
iterations: int
function_a: Optional[Callable] = None
function_b: Optional[Callable] = None
function_a_output: Optional[Any] = Field(None, description="Output of function A")
function_b_output: Optional[Any] = Field(None, description="Output of function B")
loop_data: Optional[List[Any]] = Field(None, description="List to store data generated in the loop")
current_iteration: int = Field(0, description="Current iteration of the loop")
def __init__(self, iterations: int, function_a=execute_fewshot_translation, function_b=execute_discriminator_evaluation):
super().__init__(iterations=iterations)
self.function_a = function_a
self.function_b = function_b
self.loop_data = ['test item']
def __iter__(self):
self.current_iteration = 0
return self
def __next__(self):
if self.current_iteration < self.iterations:
print("Executing function A...")
self.function_a_output: VerseMap = self.function_a()
print("Executing function B...")
# inputs for function b: (verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse:
function_b_input = {
"verse_triplets": self.function_a_output,
"hypothesis_vref": list(self.function_a_output.keys())[-1],
"hypothesis_key": "target"
}
self.function_b_output = self.function_b(**function_b_input)
self.loop_data.append((self.function_a_output, self.function_b_output))
self.current_iteration += 1
return self.function_a_output, self.function_b_output
else:
print("Reached maximum iterations, stopping loop...")
raise StopIteration
def get_loop_data(self):
return self.loop_data
class Translation():
"""Translations differ from revisions insofar as revisions require an existing draft of the target"""
def __init__(self, vref: str, target_language_code: str, number_of_examples=3, should_backtranslate=False):
self.vref = vref
self.target_language_code = target_language_code
self.number_of_examples = number_of_examples
self.should_backtranslate = should_backtranslate
bsb_bible_df, macula_df = get_dataframes()
self.verse = get_verse_triplet(full_verse_ref=self.vref, language_code=self.target_language_code, bsb_bible_df=bsb_bible_df, macula_df=macula_df)
self.vref_triplets = build_translation_prompt(vref, target_language_code)
# Predict translation
self.hypothesis: ChatResponse = execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=bsb_bible_df, macula_df=macula_df, number_of_examples=3, backtranslate=False)
# Get feedback on the translation
# NOTE: here is where various evaluation functions could be swapped out
self.feedback: ChatResponse = execute_discriminator_evaluation(self.vref_triplets, self.vref)
def get_hypothesis(self):
return self.hypothesis
def get_feedback(self):
return self.feedback
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((559, 587), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (576, 587), False, 'import logging\n'), ((801, 880), 'pandas.read_csv', 'pd.read_csv', (['"""data/bsb-utf8.txt"""'], {'sep': '"""\t"""', 'names': "['vref', 'content']", 'header': '(0)'}), "('data/bsb-utf8.txt', sep='\\t', names=['vref', 'content'], header=0)\n", (812, 880), True, 'import pandas as pd\n'), ((991, 1041), 'pandas.read_csv', 'pd.read_csv', (['"""data/combined_greek_hebrew_vref.csv"""'], {}), "('data/combined_greek_hebrew_vref.csv')\n", (1002, 1041), True, 'import pandas as pd\n'), ((3498, 3551), 'pandas.DataFrame', 'pd.DataFrame', (['target_tsv'], {'columns': "['vref', 'content']"}), "(target_tsv, columns=['vref', 'content'])\n", (3510, 3551), True, 'import pandas as pd\n'), ((3797, 3808), 'time.time', 'time.time', ([], {}), '()\n', (3806, 3808), False, 'import os, time\n'), ((4519, 4547), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4534, 4547), False, 'import lancedb\n'), ((6898, 6926), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (6913, 6926), False, 'import lancedb\n'), ((10036, 10058), 'collections.Counter', 'Counter', (['target_tokens'], {}), '(target_tokens)\n', (10043, 10058), False, 'from collections import Counter\n'), ((11697, 11714), 'nltk.FreqDist', 'FreqDist', (['n_grams'], {}), '(n_grams)\n', (11705, 11714), False, 'from nltk import FreqDist\n'), ((15245, 15273), 'random.shuffle', 'shuffle', (['verse_triplets_list'], {}), '(verse_triplets_list)\n', (15252, 15273), False, 'from random import shuffle\n'), ((16298, 16347), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (16311, 16347), False, 'import requests\n'), ((17017, 17066), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (17030, 17066), False, 'import requests\n'), ((17450, 17497), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function A"""'}), "(None, description='Output of function A')\n", (17455, 17497), False, 'from pydantic import BaseModel, Field\n'), ((17537, 17584), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function B"""'}), "(None, description='Output of function B')\n", (17542, 17584), False, 'from pydantic import BaseModel, Field\n'), ((17622, 17689), 'pydantic.Field', 'Field', (['None'], {'description': '"""List to store data generated in the loop"""'}), "(None, description='List to store data generated in the loop')\n", (17627, 17689), False, 'from pydantic import BaseModel, Field\n'), ((17719, 17772), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Current iteration of the loop"""'}), "(0, description='Current iteration of the loop')\n", (17724, 17772), False, 'from pydantic import BaseModel, Field\n'), ((1562, 1593), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (1576, 1593), False, 'import os, time\n'), ((1603, 1649), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (1612, 1649), False, 'import os, time\n'), ((2626, 2646), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2640, 2646), False, 'import os, time\n'), ((3081, 3112), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (3095, 3112), False, 'import os, time\n'), ((3122, 3168), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (3131, 3168), False, 'import os, time\n'), ((4421, 4448), 'os.path.exists', 'os.path.exists', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4435, 4448), False, 'import os, time\n'), ((4458, 4479), 'os.mkdir', 'os.mkdir', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4466, 4479), False, 'import os, time\n'), ((4933, 4974), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'df_filtered'], {}), '(embed_batch, df_filtered)\n', (4948, 4974), False, 'from lancedb.embeddings import with_embeddings\n'), ((2673, 2719), 'os.system', 'os.system', (['f"""wget {target_data_url} -O {path}"""'], {}), "(f'wget {target_data_url} -O {path}')\n", (2682, 2719), False, 'import os, time\n'), ((5469, 5480), 'time.time', 'time.time', ([], {}), '()\n', (5478, 5480), False, 'import os, time\n'), ((11431, 11458), 'nltk.util.ngrams', 'ngrams', (['target_tokens', 'size'], {}), '(target_tokens, size)\n', (11437, 11458), False, 'from nltk.util import ngrams\n')] |
import logging
import pyarrow as pa
import pyarrow.compute as pc
from tabulate import tabulate
from llama_cpp import Llama
from dryg.settings import DEFAULT_MODEL
from dryg.db import open_table, create_table
from lancedb.embeddings import with_embeddings
MODEL = None
def get_code_blocks(body: pa.ChunkedArray):
"""
Get code blocks from the body of an issue
Args:
body (str): Body of the issue
Returns:
list: List of code blocks
"""
code_blocks = []
for body_chunk in body:
if body_chunk is None:
continue
code_blocks += str(body_chunk).split("```")[1::2]
return code_blocks
def setup_model(model_name:str = None):
"""
Set the model to be used for embedding
"""
global MODEL
if model_name is None:
model_name = DEFAULT_MODEL
if model_name.endswith(".bin"):
MODEL = Llama(model_name, embedding=True, n_threads=8) # workers=8 hardcoded for now
else:
raise ValueError("Invalid model format")
def embedding_func(batch):
"""
Embedding function for the model
"""
if MODEL is None:
setup_model()
return [MODEL.embed(x) for x in batch]
def save_embeddings(issue_table: str, force: bool = False):
"""
Create an index for the issue table
"""
issues = open_table(issue_table).to_arrow()
if "vector" in issues.column_names and not force:
logging.info("Embeddings already exist. Use `force=True` to overwrite")
return
issues = with_embeddings(embedding_func, issues, "title") # Turn this into a Toy problem
create_table(issue_table, issues, mode="overwrite")
def search_table(table: str, query: str):
"""
Search issues in the issue table
Args:
issue_table (str): Name of the issue table
query (str): Query to search for
Returns:
list: List of issues
"""
issues = open_table(table)
query_embedding = embedding_func([query])[0]
results = issues.search(query_embedding).limit(4).to_df()
table = [["Title", "Link"]]
for title, link in zip(results["title"], results["html_url"]):
table.append([title, link])
print(tabulate(table))
| [
"lancedb.embeddings.with_embeddings"
] | [((1527, 1575), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embedding_func', 'issues', '"""title"""'], {}), "(embedding_func, issues, 'title')\n", (1542, 1575), False, 'from lancedb.embeddings import with_embeddings\n'), ((1611, 1662), 'dryg.db.create_table', 'create_table', (['issue_table', 'issues'], {'mode': '"""overwrite"""'}), "(issue_table, issues, mode='overwrite')\n", (1623, 1662), False, 'from dryg.db import open_table, create_table\n'), ((1918, 1935), 'dryg.db.open_table', 'open_table', (['table'], {}), '(table)\n', (1928, 1935), False, 'from dryg.db import open_table, create_table\n'), ((895, 941), 'llama_cpp.Llama', 'Llama', (['model_name'], {'embedding': '(True)', 'n_threads': '(8)'}), '(model_name, embedding=True, n_threads=8)\n', (900, 941), False, 'from llama_cpp import Llama\n'), ((1426, 1497), 'logging.info', 'logging.info', (['"""Embeddings already exist. Use `force=True` to overwrite"""'], {}), "('Embeddings already exist. Use `force=True` to overwrite')\n", (1438, 1497), False, 'import logging\n'), ((2198, 2213), 'tabulate.tabulate', 'tabulate', (['table'], {}), '(table)\n', (2206, 2213), False, 'from tabulate import tabulate\n'), ((1329, 1352), 'dryg.db.open_table', 'open_table', (['issue_table'], {}), '(issue_table)\n', (1339, 1352), False, 'from dryg.db import open_table, create_table\n')] |
from pathlib import Path
from collections import defaultdict
import math
import json
import pandas as pd
import cv2
import duckdb
import matplotlib.pyplot as plt
import numpy as np
import yaml
from tqdm import tqdm
from ultralytics.utils import LOGGER, colorstr
from ultralytics.utils.plotting import Annotator, colors
from torch import Tensor
import lancedb
import pyarrow as pa
from lancedb.embeddings import with_embeddings
from sklearn.decomposition import PCA
from yoloexplorer.dataset import get_dataset_info, Dataset
from yoloexplorer.frontend import launch
from yoloexplorer.config import TEMP_CONFIG_PATH
import torch
import torchvision.models as models
from torchvision import datasets, transforms
from PIL import Image
import sys
SCHEMA = [
"id",
# "img", # Make this optional; disabled by default. Not feasible unless we can have row_id/primary key to index
"path",
"cls",
"labels",
"bboxes",
"segments",
"keypoints",
"meta",
] # + "vector" with embeddings
def encode(img_path):
img = cv2.imread(img_path)
ext = Path(img_path).suffix
img_encoded = cv2.imencode(ext, img)[1].tobytes()
return img_encoded
def decode(img_encoded):
nparr = np.frombuffer(img_encoded, np.byte)
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
return img
class Explorer:
"""
Dataset explorer
"""
def __init__(self, data, device="", model="resnet18", batch_size=64, project="run") -> None:
"""
Args:
data (str, optional): path to dataset file
table (str, optional): path to LanceDB table to load embeddings Table from.
model (str, optional): path to model. Defaults to None.
device (str, optional): device to use. Defaults to ''. If empty, uses the default device.
project (str, optional): path to project. Defaults to "runs/dataset".
"""
self.data = data
self.table = None
self.model = model
self.device = device
self.batch_size = batch_size
self.project = project
self.dataset_info = None
self.predictor = None
self.trainset = None
self.removed_img_count = 0
self.verbose = False # For embedding function
self._sim_index = None
self.version = None
self.table_name = Path(data).name
self.temp_table_name = self.table_name + "_temp"
self.model_arch_supported = [
"resnet18",
"resnet50",
"efficientnet_b0",
"efficientnet_v2_s",
"googlenet",
"mobilenet_v3_small",
]
if model:
self.predictor = self._setup_predictor(model, device)
if data:
self.dataset_info = get_dataset_info(self.data)
self.transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
def build_embeddings(self, verbose=False, force=False, store_imgs=False):
"""
Builds the dataset in LanceDB table format
Args:
batch (int, optional): batch size. Defaults to 1000.
verbose (bool, optional): verbose. Defaults to False.
force (bool, optional): force rebuild. Defaults to False.
"""
trainset = self.dataset_info["train"]
trainset = trainset if isinstance(trainset, list) else [trainset]
self.trainset = trainset
self.verbose = verbose
dataset = Dataset(img_path=trainset, data=self.dataset_info, augment=False, cache=False)
batch_size = self.batch_size # TODO: fix this hardcoding
db = self._connect()
if not force and self.table_name in db.table_names():
LOGGER.info("LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.")
self.table = self._open_table(self.table_name)
self.version = self.table.version
if len(self.table) == dataset.ni:
return
else:
self.table = None
LOGGER.info("Table length does not match the number of images in the dataset. Building embeddings...")
table_data = defaultdict(list)
for idx, batch in enumerate(dataset):
batch["id"] = idx
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [self.dataset_info["names"][i] for i in batch["cls"]]
batch["path"] = batch["im_file"]
# batch["cls"] = batch["cls"].tolist()
keys = (key for key in SCHEMA if key in batch)
for key in keys:
val = batch[key]
if isinstance(val, Tensor):
val = val.tolist()
table_data[key].append(val)
table_data["img"].append(encode(batch["im_file"])) if store_imgs else None
if len(table_data[key]) == batch_size or idx == dataset.ni - 1:
df = pd.DataFrame(table_data)
df = with_embeddings(self._embedding_func, df, "path", batch_size=batch_size)
if self.table:
self.table.add(df)
else:
self.table = self._create_table(self.table_name, data=df, mode="overwrite")
self.version = self.table.version
table_data = defaultdict(list)
LOGGER.info(f'{colorstr("LanceDB:")} Embedding space built successfully.')
def plot_embeddings(self):
"""
Projects the embedding space to 2D using PCA
Args:
n_components (int, optional): number of components. Defaults to 2.
"""
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
pca = PCA(n_components=2)
embeddings = np.array(self.table.to_arrow()["vector"].to_pylist())
embeddings = pca.fit_transform(embeddings)
plt.scatter(embeddings[:, 0], embeddings[:, 1])
plt.show()
def get_similar_imgs(self, img, n=10):
"""
Returns the n most similar images to the given image
Args:
img (int, str, Path): index of image in the table, or path to image
n (int, optional): number of similar images to return. Defaults to 10.
Returns:
tuple: (list of paths, list of ids)
"""
embeddings = None
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
if isinstance(img, int):
embeddings = self.table.to_pandas()["vector"][img]
elif isinstance(img, (str, Path)):
img = img
elif isinstance(img, bytes):
img = decode(img)
elif isinstance(img, list): # exceptional case for batch search from dash
df = self.table.to_pandas().set_index("path")
array = None
try:
array = df.loc[img]["vector"].to_list()
embeddings = np.array(array)
except KeyError:
pass
else:
LOGGER.error("img should be index from the table(int), path of an image (str or Path), or bytes")
return
if embeddings is None:
if isinstance(img, list):
embeddings = np.array(
[self.predictor(self._image_encode(i)).squeeze().cpu().detach().numpy() for i in img]
)
else:
embeddings = self.predictor(self._image_encode(img)).squeeze().cpu().detach().numpy()
if len(embeddings.shape) > 1:
embeddings = np.mean(embeddings, axis=0)
sim = self.table.search(embeddings).limit(n).to_df()
return sim["path"].to_list(), sim["id"].to_list()
def plot_similar_imgs(self, img, n=10):
"""
Plots the n most similar images to the given image
Args:
img (int, str, Path): index of image in the table, or path to image.
n (int, optional): number of similar images to return. Defaults to 10.
"""
_, ids = self.get_similar_imgs(img, n)
self.plot_imgs(ids)
def plot_imgs(self, ids=None, query=None, labels=True):
if ids is None and query is None:
ValueError("ids or query must be provided")
# Resize the images to the minimum and maximum width and height
resized_images = []
df = self.sql(query) if query else self.table.to_pandas().iloc[ids]
for _, row in df.iterrows():
img = cv2.imread(row["path"])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if labels:
ann = Annotator(img)
for box, label, cls in zip(row["bboxes"], row["labels"], row["cls"]):
ann.box_label(box, label, color=colors(cls, True))
img = ann.result()
resized_images.append(img)
if not resized_images:
LOGGER.error("No images found")
return
# Create a grid of the images
cols = 10 if len(resized_images) > 10 else max(2, len(resized_images))
rows = max(1, math.ceil(len(resized_images) / cols))
fig, axes = plt.subplots(nrows=rows, ncols=cols)
fig.subplots_adjust(hspace=0, wspace=0)
for i, ax in enumerate(axes.ravel()):
if i < len(resized_images):
ax.imshow(resized_images[i])
ax.axis("off")
# Display the grid of images
plt.show()
def get_similarity_index(self, top_k=0.01, sim_thres=0.90, reduce=False, sorted=False):
"""
Args:
sim_thres (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9.
top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1.
dim (int, optional): Dimension of the reduced embedding space. Defaults to 256.
sorted (bool, optional): Sort the embeddings by similarity. Defaults to False.
Returns:
np.array: Similarity index
"""
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
if top_k > 1.0:
LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 1.0")
top_k = 1.0
if top_k < 0.0:
LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 0.0")
top_k = 0.0
if sim_thres is not None:
if sim_thres > 1.0:
LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 1.0")
sim_thres = 1.0
if sim_thres < 0.0:
LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 0.0")
sim_thres = 0.0
embs = np.array(self.table.to_arrow()["vector"].to_pylist())
self._sim_index = np.zeros(len(embs))
limit = max(int(len(embs) * top_k), 1)
# create a new table with reduced dimensionality to speedup the search
self._search_table = self.table
if reduce:
dim = min(256, embs.shape[1]) # TODO: make this configurable
pca = PCA(n_components=min(dim, len(embs)))
embs = pca.fit_transform(embs)
dim = embs.shape[1]
values = pa.array(embs.reshape(-1), type=pa.float32())
table_data = pa.FixedSizeListArray.from_arrays(values, dim)
table = pa.table([table_data, self.table.to_arrow()["id"]], names=["vector", "id"])
self._search_table = self._create_table("reduced_embs", data=table, mode="overwrite")
# with multiprocessing.Pool() as pool: # multiprocessing doesn't do much. Need to revisit
# list(tqdm(pool.imap(build_index, iterable)))
for _, emb in enumerate(tqdm(embs)):
df = self._search_table.search(emb).metric("cosine").limit(limit).to_df()
if sim_thres is not None:
df = df.query(f"_distance >= {1.0 - sim_thres}")
for idx in df["id"][1:]:
self._sim_index[idx] += 1
self._drop_table("reduced_embs") if reduce else None
return self._sim_index if not sorted else np.sort(self._sim_index)
def plot_similarity_index(self, sim_thres=0.90, top_k=0.01, reduce=False, sorted=False):
"""
Plots the similarity index
Args:
threshold (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9.
top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1.
dim (int, optional): Dimension of the reduced embedding space. Defaults to 256.
sorted (bool, optional): Whether to sort the index or not. Defaults to False.
"""
index = self.get_similarity_index(top_k, sim_thres, reduce)
if sorted:
index = np.sort(index)
plt.bar([i for i in range(len(index))], index)
plt.xlabel("idx")
plt.ylabel("similarity count")
plt.show()
def remove_imgs(self, idxs):
"""
Works on temporary table. To apply the changes to the main table, call `persist()`
Args:
idxs (int or list): Index of the image to remove from the dataset.
"""
if isinstance(idxs, int):
idxs = [idxs]
pa_table = self.table.to_arrow()
mask = [True for _ in range(len(pa_table))]
for idx in idxs:
mask[idx] = False
self.removed_img_count += len(idxs)
table = pa_table.filter(mask)
ids = [i for i in range(len(table))]
table = table.set_column(0, "id", [ids]) # TODO: Revisit this. This is a hack to fix the ids==dix
self.table = self._create_table(self.temp_table_name, data=table, mode="overwrite") # work on a temporary table
self.log_status()
def add_imgs(self, exp, idxs):
"""
Works on temporary table. To apply the changes to the main table, call `persist()`
Args:
data (pd.DataFrame or pa.Table): Table rows to add to the dataset.
"""
table_df = self.table.to_pandas()
data = exp.table.to_pandas().iloc[idxs]
assert len(table_df["vector"].iloc[0]) == len(data["vector"].iloc[0]), "Vector dimension mismatch"
table_df = pd.concat([table_df, data], ignore_index=True)
ids = [i for i in range(len(table_df))]
table_df["id"] = ids
self.table = self._create_table(
self.temp_table_name, data=table_df, mode="overwrite"
) # work on a temporary table
self.log_status()
def reset(self):
"""
Resets the dataset table to its original state or to the last persisted state.
"""
if self.table is None:
LOGGER.info("No changes made to the dataset.")
return
db = self._connect()
if self.temp_table_name in db.table_names():
self._drop_table(self.temp_table_name)
self.table = self._open_table(self.table_name)
self.removed_img_count = 0
# self._sim_index = None # Not sure if we should reset this as computing the index is expensive
LOGGER.info("Dataset reset to original state.")
def persist(self, name=None):
"""
Persists the changes made to the dataset. Available only if data is provided in the constructor.
Args:
name (str, optional): Name of the new dataset. Defaults to `data_updated.yaml`.
"""
db = self._connect()
if self.table is None or self.temp_table_name not in db.table_names():
LOGGER.info("No changes made to the dataset.")
return
LOGGER.info("Persisting changes to the dataset...")
self.log_status()
if not name:
name = self.data.split(".")[0] + "_updated"
datafile_name = name + ".yaml"
train_txt = "train_updated.txt"
path = Path(name).resolve() # add new train.txt file in the dataset parent path
path.mkdir(parents=True, exist_ok=True)
if (path / train_txt).exists():
(path / train_txt).unlink() # remove existing
for img in tqdm(self.table.to_pandas()["path"].to_list()):
with open(path / train_txt, "a") as f:
f.write(f"{img}" + "\n") # add image to txt file
new_dataset_info = self.dataset_info.copy()
new_dataset_info.pop("yaml_file")
new_dataset_info.pop("path") # relative paths will get messed up when merging datasets
new_dataset_info.pop("download") # Assume all files are present offline, there is no way to store metadata yet
new_dataset_info["train"] = (path / train_txt).resolve().as_posix()
for key, value in new_dataset_info.items():
if isinstance(value, Path):
new_dataset_info[key] = value.as_posix()
yaml.dump(new_dataset_info, open(path / datafile_name, "w")) # update dataset.yaml file
# TODO: not sure if this should be called data_final to prevent overwriting the original data?
self.table = self._create_table(datafile_name, data=self.table.to_arrow(), mode="overwrite")
db.drop_table(self.temp_table_name)
LOGGER.info("Changes persisted to the dataset.")
log = self._log_training_cmd(Path(path / datafile_name).relative_to(Path.cwd()).as_posix())
return log
def log_status(self):
# TODO: Pretty print log status
LOGGER.info("\n|-----------------------------------------------|")
LOGGER.info(f"\t Number of images: {len(self.table.to_arrow())}")
LOGGER.info("|------------------------------------------------|")
def sql(self, query: str):
"""
Executes a SQL query on the dataset table.
Args:
query (str): SQL query to execute.
"""
if self.table is None:
LOGGER.info("No table found. Please provide a dataset to work on.")
return
table = self.table.to_arrow() # noqa
result = duckdb.sql(query).to_df()
return result
def dash(self, exps=None, analysis=False):
"""
Launches a dashboard to visualize the dataset.
"""
config = {}
Path(TEMP_CONFIG_PATH).parent.mkdir(exist_ok=True, parents=True)
with open(TEMP_CONFIG_PATH, "w+") as file:
config_exp = [self.config]
if exps:
for exp in exps:
config_exp.append(exp.config)
config["exps"] = config_exp
config["analysis"] = analysis
json.dump(config, file)
launch()
@property
def config(self):
return {"project": self.project, "model": self.model, "device": self.device, "data": self.data}
def _log_training_cmd(self, data_path):
success_log = (
f'{colorstr("LanceDB: ") }New dataset created successfully! Run the following command to train a model:'
)
train_cmd = f"yolo train model={self.model} data={data_path} epochs=10"
success_log = success_log + "\n" + train_cmd
LOGGER.info(success_log)
return train_cmd
def _connect(self):
db = lancedb.connect(self.project)
return db
def _create_table(self, name, data=None, mode="overwrite"):
db = lancedb.connect(self.project)
table = db.create_table(name, data=data, mode=mode)
return table
def _open_table(self, name):
db = lancedb.connect(self.project)
table = db.open_table(name) if name in db.table_names() else None
if table is None:
raise ValueError(f'{colorstr("LanceDB: ") }Table not found.')
return table
def _drop_table(self, name):
db = lancedb.connect(self.project)
if name in db.table_names():
db.drop_table(name)
return True
return False
def _copy_table_to_project(self, table_path):
if not table_path.endswith(".lance"):
raise ValueError(f"{colorstr('LanceDB: ')} Table must be a .lance file")
LOGGER.info(f"Copying table from {table_path}")
path = Path(table_path).parent
name = Path(table_path).stem # lancedb doesn't need .lance extension
db = lancedb.connect(path)
table = db.open_table(name)
return self._create_table(self.table_name, data=table.to_arrow(), mode="overwrite")
def _image_encode(self, img):
image = Image.open(img)
n_channels = np.array(image).ndim
if n_channels == 2:
image = image.convert(mode="RGB")
img_tensor = self.transform(image)
trans_img = img_tensor.unsqueeze(0)
return trans_img
def _embedding_func(self, imgs):
embeddings = []
for img in tqdm(imgs):
encod_img = self._image_encode(img)
embeddings.append(self.predictor(encod_img).squeeze().cpu().detach().numpy())
return embeddings
def _setup_predictor(self, model_arch, device=""):
if model_arch in self.model_arch_supported:
load_model = getattr(models, model_arch)
model = load_model(pretrained=True)
predictor = torch.nn.Sequential(*list(model.children())[:-1])
return predictor
else:
LOGGER.error(f"Supported for {model_arch} is not added yet")
sys.exit(1)
def create_index(self):
# TODO: create index
pass
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((1044, 1064), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1054, 1064), False, 'import cv2\n'), ((1214, 1249), 'numpy.frombuffer', 'np.frombuffer', (['img_encoded', 'np.byte'], {}), '(img_encoded, np.byte)\n', (1227, 1249), True, 'import numpy as np\n'), ((1260, 1300), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_ANYCOLOR'], {}), '(nparr, cv2.IMREAD_ANYCOLOR)\n', (1272, 1300), False, 'import cv2\n'), ((1075, 1089), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (1079, 1089), False, 'from pathlib import Path\n'), ((3544, 3622), 'yoloexplorer.dataset.Dataset', 'Dataset', ([], {'img_path': 'trainset', 'data': 'self.dataset_info', 'augment': '(False)', 'cache': '(False)'}), '(img_path=trainset, data=self.dataset_info, augment=False, cache=False)\n', (3551, 3622), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((4267, 4284), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4278, 4284), False, 'from collections import defaultdict\n'), ((6116, 6135), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6119, 6135), False, 'from sklearn.decomposition import PCA\n'), ((6270, 6317), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embeddings[:, 0]', 'embeddings[:, 1]'], {}), '(embeddings[:, 0], embeddings[:, 1])\n', (6281, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6326, 6336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6334, 6336), True, 'import matplotlib.pyplot as plt\n'), ((9593, 9629), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols'}), '(nrows=rows, ncols=cols)\n', (9605, 9629), True, 'import matplotlib.pyplot as plt\n'), ((9881, 9891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9889, 9891), True, 'import matplotlib.pyplot as plt\n'), ((13468, 13485), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""idx"""'], {}), "('idx')\n", (13478, 13485), True, 'import matplotlib.pyplot as plt\n'), ((13494, 13524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""similarity count"""'], {}), "('similarity count')\n", (13504, 13524), True, 'import matplotlib.pyplot as plt\n'), ((13533, 13543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13541, 13543), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14887), 'pandas.concat', 'pd.concat', (['[table_df, data]'], {'ignore_index': '(True)'}), '([table_df, data], ignore_index=True)\n', (14850, 14887), True, 'import pandas as pd\n'), ((15716, 15763), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Dataset reset to original state."""'], {}), "('Dataset reset to original state.')\n", (15727, 15763), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16230, 16281), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Persisting changes to the dataset..."""'], {}), "('Persisting changes to the dataset...')\n", (16241, 16281), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17779, 17827), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Changes persisted to the dataset."""'], {}), "('Changes persisted to the dataset.')\n", (17790, 17827), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18023, 18092), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""\n|-----------------------------------------------|"""'], {}), '("""\n|-----------------------------------------------|""")\n', (18034, 18092), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18172, 18237), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""|------------------------------------------------|"""'], {}), "('|------------------------------------------------|')\n", (18183, 18237), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19192, 19200), 'yoloexplorer.frontend.launch', 'launch', ([], {}), '()\n', (19198, 19200), False, 'from yoloexplorer.frontend import launch\n'), ((19680, 19704), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['success_log'], {}), '(success_log)\n', (19691, 19704), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19769, 19798), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19784, 19798), False, 'import lancedb\n'), ((19896, 19925), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19911, 19925), False, 'import lancedb\n'), ((20055, 20084), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20070, 20084), False, 'import lancedb\n'), ((20327, 20356), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20342, 20356), False, 'import lancedb\n'), ((20663, 20710), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Copying table from {table_path}"""'], {}), "(f'Copying table from {table_path}')\n", (20674, 20710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20841, 20862), 'lancedb.connect', 'lancedb.connect', (['path'], {}), '(path)\n', (20856, 20862), False, 'import lancedb\n'), ((21042, 21057), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (21052, 21057), False, 'from PIL import Image\n'), ((21368, 21378), 'tqdm.tqdm', 'tqdm', (['imgs'], {}), '(imgs)\n', (21372, 21378), False, 'from tqdm import tqdm\n'), ((2346, 2356), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2350, 2356), False, 'from pathlib import Path\n'), ((2773, 2800), 'yoloexplorer.dataset.get_dataset_info', 'get_dataset_info', (['self.data'], {}), '(self.data)\n', (2789, 2800), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((3792, 3909), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite."""'], {}), "(\n 'LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.'\n )\n", (3803, 3909), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5996, 6082), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6008, 6082), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((6779, 6865), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6791, 6865), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((8007, 8034), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (8014, 8034), True, 'import numpy as np\n'), ((8928, 8951), 'cv2.imread', 'cv2.imread', (["row['path']"], {}), "(row['path'])\n", (8938, 8951), False, 'import cv2\n'), ((8970, 9006), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8982, 9006), False, 'import cv2\n'), ((9343, 9374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No images found"""'], {}), "('No images found')\n", (9355, 9374), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10534, 10620), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (10546, 10620), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10676, 10747), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 1.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 1.0')\n", (10690, 10747), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10808, 10879), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 0.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 0.0')\n", (10822, 10879), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11856, 11902), 'pyarrow.FixedSizeListArray.from_arrays', 'pa.FixedSizeListArray.from_arrays', (['values', 'dim'], {}), '(values, dim)\n', (11889, 11902), True, 'import pyarrow as pa\n'), ((12287, 12297), 'tqdm.tqdm', 'tqdm', (['embs'], {}), '(embs)\n', (12291, 12297), False, 'from tqdm import tqdm\n'), ((12680, 12704), 'numpy.sort', 'np.sort', (['self._sim_index'], {}), '(self._sim_index)\n', (12687, 12704), True, 'import numpy as np\n'), ((13390, 13404), 'numpy.sort', 'np.sort', (['index'], {}), '(index)\n', (13397, 13404), True, 'import numpy as np\n'), ((15313, 15359), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (15324, 15359), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16155, 16201), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (16166, 16201), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18450, 18517), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No table found. Please provide a dataset to work on."""'], {}), "('No table found. Please provide a dataset to work on.')\n", (18461, 18517), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19159, 19182), 'json.dump', 'json.dump', (['config', 'file'], {}), '(config, file)\n', (19168, 19182), False, 'import json\n'), ((20726, 20742), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20730, 20742), False, 'from pathlib import Path\n'), ((20765, 20781), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20769, 20781), False, 'from pathlib import Path\n'), ((21079, 21094), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (21087, 21094), True, 'import numpy as np\n'), ((21884, 21944), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['f"""Supported for {model_arch} is not added yet"""'], {}), "(f'Supported for {model_arch} is not added yet')\n", (21896, 21944), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((21957, 21968), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21965, 21968), False, 'import sys\n'), ((1115, 1137), 'cv2.imencode', 'cv2.imencode', (['ext', 'img'], {}), '(ext, img)\n', (1127, 1137), False, 'import cv2\n'), ((2877, 2906), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2894, 2906), False, 'from torchvision import datasets, transforms\n'), ((2924, 2945), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2943, 2945), False, 'from torchvision import datasets, transforms\n'), ((4142, 4254), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table length does not match the number of images in the dataset. Building embeddings..."""'], {}), "(\n 'Table length does not match the number of images in the dataset. Building embeddings...'\n )\n", (4153, 4254), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5262, 5286), 'pandas.DataFrame', 'pd.DataFrame', (['table_data'], {}), '(table_data)\n', (5274, 5286), True, 'import pandas as pd\n'), ((5308, 5380), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['self._embedding_func', 'df', '"""path"""'], {'batch_size': 'batch_size'}), "(self._embedding_func, df, 'path', batch_size=batch_size)\n", (5323, 5380), False, 'from lancedb.embeddings import with_embeddings\n'), ((5648, 5665), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5659, 5665), False, 'from collections import defaultdict\n'), ((9052, 9066), 'ultralytics.utils.plotting.Annotator', 'Annotator', (['img'], {}), '(img)\n', (9061, 9066), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((10986, 11065), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 1.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 1.0')\n", (11000, 11065), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11146, 11225), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 0.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 0.0')\n", (11160, 11225), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16481, 16491), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (16485, 16491), False, 'from pathlib import Path\n'), ((18601, 18618), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (18611, 18618), False, 'import duckdb\n'), ((19426, 19447), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (19434, 19447), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5690, 5710), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB:"""'], {}), "('LanceDB:')\n", (5698, 5710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11817, 11829), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11827, 11829), True, 'import pyarrow as pa\n'), ((18805, 18827), 'pathlib.Path', 'Path', (['TEMP_CONFIG_PATH'], {}), '(TEMP_CONFIG_PATH)\n', (18809, 18827), False, 'from pathlib import Path\n'), ((7473, 7580), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""img should be index from the table(int), path of an image (str or Path), or bytes"""'], {}), "(\n 'img should be index from the table(int), path of an image (str or Path), or bytes'\n )\n", (7485, 7580), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17904, 17914), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (17912, 17914), False, 'from pathlib import Path\n'), ((20217, 20238), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20225, 20238), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20601, 20622), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20609, 20622), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((7381, 7396), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (7389, 7396), True, 'import numpy as np\n'), ((9205, 9222), 'ultralytics.utils.plotting.colors', 'colors', (['cls', '(True)'], {}), '(cls, True)\n', (9211, 9222), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((17865, 17891), 'pathlib.Path', 'Path', (['(path / datafile_name)'], {}), '(path / datafile_name)\n', (17869, 17891), False, 'from pathlib import Path\n')] |
"""
Run this script to benchmark the serial search performance of FTS and vector search
"""
import argparse
import random
from functools import lru_cache
from pathlib import Path
from typing import Any
from codetiming import Timer
from config import Settings
from rich import progress
from schemas.wine import SearchResult
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.table import Table
# Custom types
JsonBlob = dict[str, Any]
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def get_query_terms(filename: str) -> list[str]:
assert filename.endswith(".txt")
query_terms_file = Path("./benchmark_queries") / filename
with open(query_terms_file, "r") as f:
queries = f.readlines()
assert queries
result = [query.strip() for query in queries]
return result
def fts_search(table: Table, query: str) -> list[SearchResult] | None:
search_result = (
table.search(query, vector_column_name="description")
.select(["id", "title", "description", "country", "variety", "price", "points"])
.limit(10)
).to_pydantic(SearchResult)
if not search_result:
return None
return search_result
def vector_search(model, table: Table, query: str) -> list[SearchResult] | None:
query_vector = model.encode(query.lower())
search_result = (
table.search(query_vector)
.metric("cosine")
.nprobes(20)
.select(["id", "title", "description", "country", "variety", "price", "points"])
.limit(10)
).to_pydantic(SearchResult)
if not search_result:
return None
return search_result
def main():
if args.search == "fts":
URL = "http://localhost:8000/fts_search"
queries = get_query_terms("keyword_terms.txt")
else:
URL = "http://localhost:8000/vector_search"
queries = get_query_terms("vector_terms.txt")
random_choice_queries = [random.choice(queries) for _ in range(LIMIT)]
# Run the search directly on the lancedb table
with Timer(name="Serial search", text="Finished search in {:.4f} sec"):
# Add rich progress bar
with progress.Progress(
"[progress.description]{task.description}",
progress.BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
progress.TimeElapsedColumn(),
) as prog:
overall_progress_task = prog.add_task(
f"Performing {args.search} search", total=len(random_choice_queries)
)
for query in random_choice_queries:
if args.search == "fts":
_ = fts_search(tbl, query)
else:
_ = vector_search(MODEL, tbl, query)
prog.update(overall_progress_task, advance=1)
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=37, help="Seed for random number generator")
parser.add_argument("--limit", "-l", type=int, default=10, help="Number of search terms to randomly generate")
parser.add_argument("--search", type=str, default="fts", help="Specify whether to do FTS or vector search")
args = parser.parse_args()
# fmt: on
LIMIT = args.limit
SEED = args.seed
# Assert that the search type is only one of "fts" or "vector"
assert args.search in ["fts", "vector"], "Please specify a valid search type: 'fts' or 'vector'"
# Assumes that the table in the DB has already been created
DB_NAME = "./winemag"
TABLE = "wines"
db = lancedb.connect(DB_NAME)
tbl = db.open_table(TABLE)
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
main()
| [
"lancedb.connect"
] | [((471, 482), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (480, 482), False, 'from functools import lru_cache\n'), ((579, 589), 'config.Settings', 'Settings', ([], {}), '()\n', (587, 589), False, 'from config import Settings\n'), ((2943, 2968), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2966, 2968), False, 'import argparse\n'), ((3672, 3696), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3687, 3696), False, 'import lancedb\n'), ((3971, 4000), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (3990, 4000), False, 'from sentence_transformers import SentenceTransformer\n'), ((701, 728), 'pathlib.Path', 'Path', (['"""./benchmark_queries"""'], {}), "('./benchmark_queries')\n", (705, 728), False, 'from pathlib import Path\n'), ((2009, 2031), 'random.choice', 'random.choice', (['queries'], {}), '(queries)\n', (2022, 2031), False, 'import random\n'), ((2116, 2181), 'codetiming.Timer', 'Timer', ([], {'name': '"""Serial search"""', 'text': '"""Finished search in {:.4f} sec"""'}), "(name='Serial search', text='Finished search in {:.4f} sec')\n", (2121, 2181), False, 'from codetiming import Timer\n'), ((2315, 2335), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (2333, 2335), False, 'from rich import progress\n'), ((2410, 2438), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (2436, 2438), False, 'from rich import progress\n')] |
from neumai.Shared.NeumSinkInfo import NeumSinkInfo
from neumai.Shared.NeumVector import NeumVector
from neumai.Shared.NeumSearch import NeumSearchResult
from neumai.Shared.Exceptions import(
LanceDBInsertionException,
LanceDBIndexInfoException,
LanceDBIndexCreationException,
LanceDBQueryException
)
from neumai.SinkConnectors.SinkConnector import SinkConnector
from typing import List, Optional
from neumai.SinkConnectors.filter_utils import FilterCondition
from pydantic import Field
import lancedb
from lancedb import DBConnection
class LanceDBSink(SinkConnector):
"""
LanceDB sink
A sink connector for LanceDB, designed to facilitate data output into a
LanceDB storage system. For details about LanceDB, refer to
https://github.com/lancedb/lancedb.
LanceDB supports flat search as well as ANN search.
For indexing, read here - https://lancedb.github.io/lancedb/ann_indexes/#creating-an-ivf_pq-index
Attributes:
-----------
uri: str
URI for LanceDB database.
api_key: str
If presented, connect to LanceDB cloud.
Otherwise, connect to a database on file system or cloud storage.
region: str
Region for use of LanceDB cloud.
table_name: str
Name of LanceDB table to use
create_index: bool
LanceDB offers flat search as well as ANN search. If set to True,
a vector index would be created for searching instead of a
brute-force knn search.
metric: str
The distance metric to use. By default it uses euclidean distance 'L2'.
It also supports 'cosine' and 'dot' distance as well. Needs to be set if create_index is True.
num_partitions: int
The number of partitions of the index.
Needs to be set if create_index is True. And needs to be altered as per data size.
num_sub_vectors: int
The number of sub-vectors (M) that will be created during
Product Quantization (PQ). For D dimensional vector, it will be divided into
M of D/M sub-vectors, each of which is presented by a single PQ code.
accelerator: str
The accelerator to use for the index creation process. Supports GPU and MPS.
Example usage:
ldb = LanceDBSink(uri="data/test_ldb_sink", table_name="demo_ldb_table")
ldb.store(neum_vectors)
ldb.search(query)
"""
uri: str = Field(..., description="URI for LanceDB database")
api_key: Optional[str] = Field(default=None, description="API key for LanceDB cloud")
region: Optional[str] = Field(default=None, description="Region for use of LanceDB cloud")
table_name: str = Field(..., description="Name of LanceDB table to use")
create_index: bool = Field(default=False, description="Boolean to create index or use flat search")
metric: str = Field(default="cosine", description="The distance metric to use in the index")
num_partitions: int = Field(default=256, description="The number of partitions of the index")
num_sub_vectors: int = Field(default=96, description="The number of sub-vectors (M) that will be created during Product Quantization (PQ)")
accelerator: str = Field(default=None, description="Specify to cuda or mps (on Apple Silicon) to enable GPU training.")
# Check API reference for more details
# - https://lancedb.github.io/lancedb/python/python/#lancedb.connect
# db: DBConnection = lancedb.connect(uri=uri, api_key=api_key, region=region)
@property
def sink_name(self) -> str:
return "LanceDBSink"
@property
def required_properties(self) -> List[str]:
return ['uri', 'api_key', 'table_name']
@property
def optional_properties(self) -> List[str]:
return []
def validation(self) -> bool:
"""config_validation connector setup"""
db = lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region)
return True
def _get_db_connection(self) -> DBConnection:
return lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region)
def store(self, vectors_to_store: List[NeumVector]) -> int:
db = self._get_db_connection()
table_name = self.table_name
data = []
for vec in vectors_to_store:
dic = {
'id': vec.id,
'vector': vec.vector,
}
for k,v in vec.metadata.items():
dic[k] = v
data.append(dic)
tbl = db.create_table(table_name, data=data, mode="overwrite")
if tbl:
return len(tbl.to_pandas())
raise LanceDBInsertionException("LanceDB storing failed. Try later")
def search(self, vector: List[float], number_of_results: int, filters: List[FilterCondition] = []) -> List[NeumSearchResult]:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
if self.create_index:
# For more details, refer to docs
# - https://lancedb.github.io/lancedb/python/python/#lancedb.table.Table.create_index
try:
tbl.create_index(
metric=self.metric,
num_partitions=self.num_partitions,
num_sub_vectors=self.num_sub_vectors,
accelerator=self.accelerator,
replace=True)
except Exception as e:
raise LanceDBIndexCreationException(f"LanceDB index creation failed. \nException - {e}")
try:
search_results = tbl.search(query=vector)
for filter in filters:
search_results = search_results.where(f"{filter.field} {filter.operator.value} {filter.value}")
search_results = search_results.limit(number_of_results).to_pandas()
except Exception as e:
raise LanceDBQueryException(f"Failed to query LanceDB. Exception - {e}")
matches = []
cols = search_results.columns
for i in range(len(search_results)):
_id = search_results.iloc[i]['id']
_vec = list(search_results.iloc[i]['vector'])
matches.append(
NeumSearchResult(
id=_id,
vector=_vec,
metadata={k:search_results.iloc[i][k] for k in cols if k not in ['id', 'vector', '_distance']},
score=1-search_results.iloc[i]['_distance']
)
)
return matches
def get_representative_vector(self) -> list:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
return list(tbl.to_pandas()['vector'].mean())
def info(self) -> NeumSinkInfo:
try:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
return(NeumSinkInfo(number_vectors_stored=len(tbl)))
except Exception as e:
raise LanceDBIndexInfoException(f"Failed to get information from LanceDB. Exception - {e}")
def delete_vectors_with_file_id(self, file_id: str) -> bool:
db = self._get_db_connection()
table_name = self.table_name
tbl = db.open_table(table_name)
try:
tbl.delete(where=f"id = '{file_id}'")
except:
raise Exception("LanceDB deletion by file id failed.")
return True | [
"lancedb.connect"
] | [((2397, 2447), 'pydantic.Field', 'Field', (['...'], {'description': '"""URI for LanceDB database"""'}), "(..., description='URI for LanceDB database')\n", (2402, 2447), False, 'from pydantic import Field\n'), ((2477, 2537), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""API key for LanceDB cloud"""'}), "(default=None, description='API key for LanceDB cloud')\n", (2482, 2537), False, 'from pydantic import Field\n'), ((2566, 2632), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Region for use of LanceDB cloud"""'}), "(default=None, description='Region for use of LanceDB cloud')\n", (2571, 2632), False, 'from pydantic import Field\n'), ((2655, 2709), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of LanceDB table to use"""'}), "(..., description='Name of LanceDB table to use')\n", (2660, 2709), False, 'from pydantic import Field\n'), ((2735, 2813), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Boolean to create index or use flat search"""'}), "(default=False, description='Boolean to create index or use flat search')\n", (2740, 2813), False, 'from pydantic import Field\n'), ((2832, 2910), 'pydantic.Field', 'Field', ([], {'default': '"""cosine"""', 'description': '"""The distance metric to use in the index"""'}), "(default='cosine', description='The distance metric to use in the index')\n", (2837, 2910), False, 'from pydantic import Field\n'), ((2937, 3008), 'pydantic.Field', 'Field', ([], {'default': '(256)', 'description': '"""The number of partitions of the index"""'}), "(default=256, description='The number of partitions of the index')\n", (2942, 3008), False, 'from pydantic import Field\n'), ((3036, 3162), 'pydantic.Field', 'Field', ([], {'default': '(96)', 'description': '"""The number of sub-vectors (M) that will be created during Product Quantization (PQ)"""'}), "(default=96, description=\n 'The number of sub-vectors (M) that will be created during Product Quantization (PQ)'\n )\n", (3041, 3162), False, 'from pydantic import Field\n'), ((3176, 3281), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Specify to cuda or mps (on Apple Silicon) to enable GPU training."""'}), "(default=None, description=\n 'Specify to cuda or mps (on Apple Silicon) to enable GPU training.')\n", (3181, 3281), False, 'from pydantic import Field\n'), ((3852, 3923), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (3867, 3923), False, 'import lancedb\n'), ((4015, 4086), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (4030, 4086), False, 'import lancedb\n'), ((4629, 4691), 'neumai.Shared.Exceptions.LanceDBInsertionException', 'LanceDBInsertionException', (['"""LanceDB storing failed. Try later"""'], {}), "('LanceDB storing failed. Try later')\n", (4654, 4691), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5864, 5930), 'neumai.Shared.Exceptions.LanceDBQueryException', 'LanceDBQueryException', (['f"""Failed to query LanceDB. Exception - {e}"""'], {}), "(f'Failed to query LanceDB. Exception - {e}')\n", (5885, 5930), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((6186, 6376), 'neumai.Shared.NeumSearch.NeumSearchResult', 'NeumSearchResult', ([], {'id': '_id', 'vector': '_vec', 'metadata': "{k: search_results.iloc[i][k] for k in cols if k not in ['id', 'vector',\n '_distance']}", 'score': "(1 - search_results.iloc[i]['_distance'])"}), "(id=_id, vector=_vec, metadata={k: search_results.iloc[i][k\n ] for k in cols if k not in ['id', 'vector', '_distance']}, score=1 -\n search_results.iloc[i]['_distance'])\n", (6202, 6376), False, 'from neumai.Shared.NeumSearch import NeumSearchResult\n'), ((6958, 7048), 'neumai.Shared.Exceptions.LanceDBIndexInfoException', 'LanceDBIndexInfoException', (['f"""Failed to get information from LanceDB. Exception - {e}"""'], {}), "(\n f'Failed to get information from LanceDB. Exception - {e}')\n", (6983, 7048), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5435, 5525), 'neumai.Shared.Exceptions.LanceDBIndexCreationException', 'LanceDBIndexCreationException', (['f"""LanceDB index creation failed. \nException - {e}"""'], {}), '(\n f"""LanceDB index creation failed. \nException - {e}""")\n', (5464, 5525), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n')] |
from FlagEmbedding import LLMEmbedder, FlagReranker
import lancedb
import re
import pandas as pd
import random
from datasets import load_dataset
import torch
import gc
from lancedb.embeddings import with_embeddings
embed_model = LLMEmbedder(
"BAAI/llm-embedder", use_fp16=False
) # Load model (automatically use GPUs)
reranker_model = FlagReranker(
"BAAI/bge-reranker-base", use_fp16=True
) # use_fp16 speeds up computation with a slight performance degradation
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
# get embedding using LLM embedder
def embed_documents(batch):
"""
Function to embed the whole text data
"""
return embed_model.encode_keys(batch, task=task) # Encode data or 'keys'
def search(table, query, top_k=10):
"""
Search a query from the table
"""
query_vector = embed_model.encode_queries(
query, task=task
) # Encode the QUERY (it is done differently than the 'key')
search_results = table.search(query_vector).limit(top_k)
return search_results
def rerank(query, search_results):
search_results["old_similarity_rank"] = search_results.index + 1 # Old ranks
torch.cuda.empty_cache()
gc.collect()
search_results["new_scores"] = reranker_model.compute_score(
[[query, chunk] for chunk in search_results["text"]]
) # Re compute ranks
return search_results.sort_values(by="new_scores", ascending=False).reset_index(
drop=True
)
def main():
queries = load_dataset("BeIR/scidocs", "queries")["queries"].to_pandas()
docs = (
load_dataset("BeIR/scidocs", "corpus")["corpus"]
.to_pandas()
.dropna(subset="text")
.sample(10000)
) # just random samples for faster embed demo
# create Database using LanceDB Cloud
uri = "db://your-project-slug"
api_key = "sk_..."
db = lancedb.connect(uri, api_key=api_key, region="us-east-1")
table_name = "doc_embed"
try:
# Use the train text chunk data to save embed in the DB
data = with_embeddings(
embed_documents, docs, column="text", show_progress=True, batch_size=128
)
table = db.create_table(table_name, data=data) # create Table
except:
table = db.open_table(table_name) # Open Table
query = random.choice(queries["text"])
print("QUERY:-> ", query)
# get top_k search results
search_results = (
search(table, "what is mitochondria?", top_k=10)
.to_pandas()
.dropna(subset="text")
.reset_index(drop=True)
)
print("SEARCH RESULTS:-> ", search_results)
# Rerank search results using Reranker from BGE Reranker
print("QUERY:-> ", query)
search_results_reranked = rerank(query, search_results)
print("SEARCH RESULTS RERANKED:-> ", search_results_reranked)
if __name__ == "__main__":
main()
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((233, 281), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (244, 281), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((344, 397), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (356, 397), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((1196, 1220), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1218, 1220), False, 'import torch\n'), ((1225, 1237), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1235, 1237), False, 'import gc\n'), ((1897, 1954), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': '"""us-east-1"""'}), "(uri, api_key=api_key, region='us-east-1')\n", (1912, 1954), False, 'import lancedb\n'), ((2072, 2165), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'docs'], {'column': '"""text"""', 'show_progress': '(True)', 'batch_size': '(128)'}), "(embed_documents, docs, column='text', show_progress=True,\n batch_size=128)\n", (2087, 2165), False, 'from lancedb.embeddings import with_embeddings\n'), ((2340, 2370), 'random.choice', 'random.choice', (["queries['text']"], {}), "(queries['text'])\n", (2353, 2370), False, 'import random\n'), ((1528, 1567), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""queries"""'], {}), "('BeIR/scidocs', 'queries')\n", (1540, 1567), False, 'from datasets import load_dataset\n'), ((1612, 1650), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""corpus"""'], {}), "('BeIR/scidocs', 'corpus')\n", (1624, 1650), False, 'from datasets import load_dataset\n')] |
import os
import urllib.request
import shutil
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from sentence_transformers import SentenceTransformer
import numpy as np
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
import json
os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E"
# get the ruleset from a local file
fp = urllib.request.urlopen("file:///home/ubuntu/insuranceagent.html")
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# and convert it to text
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
text = text.split("Introduction")[1]
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
docs = [x.replace('#', '-') for x in docs]
# Now we need to embed these documents and put them into a "vector store" or
# "vector db" that we will use for semantic search and retrieval.
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
if os.path.exists(".lancedb"):
shutil.rmtree(".lancedb")
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([i,docs[i]])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the DB table and add the records.
db.create_table("linux", data=data)
table = db.open_table("linux")
table.add(data=data)
# Now let's augment our Q&A prompt with this external knowledge on-the-fly!!!
template = """### Instruction:
Read the below input context and respond with a short answer to the given question. Use only the information in the below input to answer the question. If you cannot answer the question, respond with "Sorry, I can't find an answer, but you might try looking in the following resource."
### Input:
Context: {context}
Question: {question}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
#define the pre-prompt in order to give the LLM a little bit of expertise
pre_prompt="You are an expert insurance agent. You are getting information about a property. The information is a mixture of the state of the house and the homeowner's complaints. The state of the house will be just a few words describing the condition (for example, water damage). You will analyze the input and produce exactly three insights. These insights should constitute maintenance and protection recommendations for homeowners tailored to their home's condition. All the insights are at most 20 words long. Generate the insights in this form: Insight 1: (text), then on a new line, Insight 2: (text), then on a new line, Insight 3: (text). Only generate the insights and nothing else. Keep a professional tone. Do not make quote anyone. Do not add unrelated information. Do not add any code. Here is the home's condition: "
def rag_answer(message):
# Search the for relevant context
results = table.search(embed(message)).limit(10).to_pandas()
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = results['text'].values[0]
# Augment the prompt with the context
prompt = qa_prompt.format(context=doc_use, question=message)
# Get a response
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt
)
return result['choices'][0]['text']
with open('vision_output.json','r') as json_file:
data=json.load(json_file)
visionoutput=data['vision_output']
with open('data.json','r') as json_file:
data=json.load(json_file)
ownercomplaint=data['text']
house_condition=visionoutput+". "+ownercomplaint
#house_condition="Water damage. The gas lines don't work. The kitchen is spotless. The building is in good condition and the walls do not have any cracks in them. There is a termite infestation in the basement."
response=rag_answer(pre_prompt+house_condition)
#response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old.")
print('')
print("3 insights that we've generated based on your report are:\n", response)
with open('insights.json', 'w') as json_file:
json.dump(response,json_file)
with open('stats_output.json','r') as json_file:
data=json.load(json_file)
predicted_claim=str(data['stats'])
#predicted_claim=0.5 #input from statistical model
full_report_pre_prompt="You are an expert insurance agent. You have been given a list of personalized insights about a home that has been surveyed, along with a probability that the homeowner files a claim in the next 3 to 6 months. Based on this, give the property a rating from 1 to 5, where 5 means that the property is healthy, and also explain why the rating was given in not more than 180 words, based on the input insights. A rating of 1 means that the property is not healthy at all. In this scenario, a healthy property is one that has mostly positive or neutral insights and a low probability of having a claim filed. An unhealthy probability is one that has mostly negative insights and a high probability of having a claim filed. Remember that even if the homeowner has a high chance of filing a claim, the property may have positive insights and therefore you should give it a higher score. The rating should be at the beginning of your response. Ensure that you do not have any incomplete sentences. Do not quote anyone. Do not quote any insights verbatim. Keep the tone professional. You are permitted to expand upon the insights but do not stray. Ensure that you complete each sentence. Keep the report to only one continuous paragraph. The insights are: "
#full_report_temp_prompt=full_report_pre_prompt+response
full_report_final_prompt=full_report_pre_prompt+" .The probability of filing a claim is: "+str(predicted_claim)
full_report=rag_answer(full_report_final_prompt)
#full_report_temp_2=rag_answer(full_report_final_prompt)
#full_report_second_prompt="You are an insurance agent that was given an incomplete report. You have psychic powers and can complete missing reports, with perfect extrapolation. Complete the given incomplete report: "
#full_report=rag_answer(full_report_second_prompt+full_report_temp_2)
print("The full report is: ")
print(full_report)
with open('fullreport.json','w') as json_file:
json.dump(full_report,json_file)
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((657, 678), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (676, 678), False, 'import html2text\n'), ((847, 902), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (868, 902), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1183, 1208), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1202, 1208), False, 'from sentence_transformers import SentenceTransformer\n'), ((1368, 1394), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (1382, 1394), False, 'import os\n'), ((1427, 1447), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1435, 1447), False, 'import os\n'), ((1470, 1490), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1485, 1490), False, 'import lancedb\n'), ((1626, 1675), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (1638, 1675), True, 'import pandas as pd\n'), ((1706, 1742), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (1721, 1742), False, 'from lancedb.embeddings import with_embeddings\n'), ((2355, 2429), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2369, 2429), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1400, 1425), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (1413, 1425), False, 'import shutil\n'), ((3719, 3786), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3739, 3786), True, 'import predictionguard as pg\n'), ((3902, 3922), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3911, 3922), False, 'import json\n'), ((4010, 4030), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4019, 4030), False, 'import json\n'), ((4800, 4830), 'json.dump', 'json.dump', (['response', 'json_file'], {}), '(response, json_file)\n', (4809, 4830), False, 'import json\n'), ((4889, 4909), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4898, 4909), False, 'import json\n'), ((6933, 6966), 'json.dump', 'json.dump', (['full_report', 'json_file'], {}), '(full_report, json_file)\n', (6942, 6966), False, 'import json\n')] |
import json
import logging
from typing import Any, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type
import lancedb
import pandas as pd
from dotenv import load_dotenv
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import LanceVectorQueryBuilder
from pydantic import BaseModel, ValidationError, create_model
from src.embedding_models.base import (
EmbeddingModel,
EmbeddingModelsConfig,
)
from src.embedding_models.models import OpenAIEmbeddingsConfig
from src.types import Document, EmbeddingFunction
from src.utils.configuration import settings
from src.utils.pydantic_utils import (
clean_schema,
dataframe_to_document_model,
dataframe_to_documents,
extract_fields,
flatten_pydantic_instance,
flatten_pydantic_model,
nested_dict_from_flat,
)
from src.db.base import VectorStore, VectorStoreConfig
logger = logging.getLogger(__name__)
class LanceDBConfig(VectorStoreConfig):
collection_name: str | None = "temp"
storage_path: str = ".lancedb/data"
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
distance: str = "cosine"
document_class: Type[Document] = Document
flatten: bool = False # flatten Document class into LanceSchema ?
filter_fields: List[str] = [] # fields usable in filter
filter: str | None = None # filter condition for lexical/semantic search
class LanceDB(VectorStore):
def __init__(self, config: LanceDBConfig = LanceDBConfig()):
super().__init__(config)
self.config: LanceDBConfig = config
emb_model = EmbeddingModel.create(config.embedding)
self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn()
self.embedding_dim = emb_model.embedding_dims
self.host = None
self.port = None
self.is_from_dataframe = False # were docs ingested from a dataframe?
self.df_metadata_columns: List[str] = [] # metadata columns from dataframe
self._setup_schemas(config.document_class)
load_dotenv()
try:
self.client = lancedb.connect(
uri=config.storage_path,
)
except Exception as e:
new_storage_path = config.storage_path + ".new"
logger.warning(
f"""
Error connecting to local LanceDB at {config.storage_path}:
{e}
Switching to {new_storage_path}
"""
)
self.client = lancedb.connect(
uri=new_storage_path,
)
# Note: Only create collection if a non-null collection name is provided.
# This is useful to delay creation of vecdb until we have a suitable
# collection name (e.g. we could get it from the url or folder path).
if config.collection_name is not None:
self.create_collection(
config.collection_name, replace=config.replace_collection
)
def _setup_schemas(self, doc_cls: Type[Document] | None) -> None:
doc_cls = doc_cls or self.config.document_class
self.unflattened_schema = self._create_lance_schema(doc_cls)
self.schema = (
self._create_flat_lance_schema(doc_cls)
if self.config.flatten
else self.unflattened_schema
)
def clear_empty_collections(self) -> int:
coll_names = self.list_collections()
n_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
if nr == 0:
n_deletes += 1
self.client.drop_table(name)
return n_deletes
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = [
c for c in self.list_collections(empty=True) if c.startswith(prefix)
]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
n_empty_deletes = 0
n_non_empty_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
n_empty_deletes += nr == 0
n_non_empty_deletes += nr > 0
self.client.drop_table(name)
logger.warning(
f"""
Deleted {n_empty_deletes} empty collections and
{n_non_empty_deletes} non-empty collections.
"""
)
return n_empty_deletes + n_non_empty_deletes
def list_collections(self, empty: bool = False) -> List[str]:
"""
Returns:
List of collection names that have at least one vector.
Args:
empty (bool, optional): Whether to include empty collections.
"""
colls = self.client.table_names()
if len(colls) == 0:
return []
if empty: # include empty tbls
return colls # type: ignore
counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls]
return [coll for coll, count in zip(colls, counts) if count > 0]
def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Create a subclass of LanceModel with fields:
- id (str)
- Vector field that has dims equal to
the embedding dimension of the embedding model, and a data field of type
DocClass.
- other fields from doc_cls
Args:
doc_cls (Type[Document]): A Pydantic model which should be a subclass of
Document, to be used as the type for the data field.
Returns:
Type[BaseModel]: A new Pydantic model subclassing from LanceModel.
Raises:
ValueError: If `n` is not a non-negative integer or if `DocClass` is not a
subclass of Document.
"""
if not issubclass(doc_cls, Document):
raise ValueError("DocClass must be a subclass of Document")
n = self.embedding_dim
# Prepare fields for the new model
fields = {"id": (str, ...), "vector": (Vector(n), ...)}
# Add both statically and dynamically defined fields from doc_cls
for field_name, field in doc_cls.model_fields.items():
fields[field_name] = (field.annotation, field.default)
# Create the new model with dynamic fields
NewModel = create_model(
"NewModel", __base__=LanceModel, **fields
) # type: ignore
return NewModel # type: ignore
def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Flat version of the lance_schema, as nested Pydantic schemas are not yet
supported by LanceDB.
"""
lance_model = self._create_lance_schema(doc_cls)
FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel)
return FlatModel
def create_collection(self, collection_name: str, replace: bool = False) -> None:
"""
Create a collection with the given name, optionally replacing an existing
collection if `replace` is True.
Args:
collection_name (str): Name of the collection to create.
replace (bool): Whether to replace an existing collection
with the same name. Defaults to False.
"""
self.config.collection_name = collection_name
collections = self.list_collections()
if collection_name in collections:
coll = self.client.open_table(collection_name)
if coll.head().shape[0] > 0:
logger.warning(f"Non-empty Collection {collection_name} already exists")
if not replace:
logger.warning("Not replacing collection")
return
else:
logger.warning("Recreating fresh collection")
self.client.create_table(
collection_name, schema=self.schema, mode="overwrite", on_bad_vectors="drop"
)
tbl = self.client.open_table(self.config.collection_name)
# We assume "content" is available as top-level field
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
if settings.debug:
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
logger.setLevel(level)
def add_documents(self, documents: Sequence[Document]) -> None:
super().maybe_add_ids(documents)
colls = self.list_collections(empty=True)
if len(documents) == 0:
return
embedding_vecs = self.embedding_fn([doc.content for doc in documents])
coll_name = self.config.collection_name
if coll_name is None:
raise ValueError("No collection name set, cannot ingest docs")
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it,
# possibly with a new schema
doc_cls = type(documents[0])
self.config.document_class = doc_cls
self._setup_schemas(doc_cls)
self.create_collection(coll_name, replace=True)
ids = [str(d.id()) for d in documents]
# don't insert all at once, batch in chunks of b,
# else we get an API error
b = self.config.batch_size
def make_batches() -> Generator[List[BaseModel], None, None]:
for i in range(0, len(ids), b):
batch = [
self.unflattened_schema(
id=ids[i],
vector=embedding_vecs[i],
**doc.model_dump(),
)
for i, doc in enumerate(documents[i : i + b])
]
if self.config.flatten:
batch = [
flatten_pydantic_instance(instance) # type: ignore
for instance in batch
]
yield batch
tbl = self.client.open_table(self.config.collection_name)
try:
tbl.add(make_batches())
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
except Exception as e:
logger.error(
f"""
Error adding documents to LanceDB: {e}
POSSIBLE REMEDY: Delete the LancdDB storage directory
{self.config.storage_path} and try again.
"""
)
def add_dataframe(
self,
df: pd.DataFrame,
content: str = "content",
metadata: List[str] = [],
) -> None:
"""
Add a dataframe to the collection.
Args:
df (pd.DataFrame): A dataframe
content (str): The name of the column in the dataframe that contains the
text content to be embedded using the embedding model.
metadata (List[str]): A list of column names in the dataframe that contain
metadata to be stored in the database. Defaults to [].
"""
self.is_from_dataframe = True
actual_metadata = metadata.copy()
self.df_metadata_columns = actual_metadata # could be updated below
# get content column
content_values = df[content].values.tolist()
if "vector" not in df.columns:
embedding_vecs = self.embedding_fn(content_values)
df["vector"] = embedding_vecs
if content != "content":
# rename content column to "content", leave existing column intact
df = df.rename(columns={content: "content"}, inplace=False)
if "id" not in df.columns:
docs = dataframe_to_documents(df, content="content", metadata=metadata)
ids = [str(d.id()) for d in docs]
df["id"] = ids
if "id" not in actual_metadata:
actual_metadata += ["id"]
colls = self.list_collections(empty=True)
coll_name = self.config.collection_name
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it
# and set new schema from df
self.client.create_table(
self.config.collection_name,
data=df,
mode="overwrite",
on_bad_vectors="drop",
)
doc_cls = dataframe_to_document_model(
df,
content=content,
metadata=actual_metadata,
exclude=["vector"],
)
self.config.document_class = doc_cls # type: ignore
self._setup_schemas(doc_cls) # type: ignore
tbl = self.client.open_table(self.config.collection_name)
# We assume "content" is available as top-level field
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
else:
# collection exists and is not empty, so append to it
tbl = self.client.open_table(self.config.collection_name)
tbl.add(df)
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
def delete_collection(self, collection_name: str) -> None:
self.client.drop_table(collection_name)
def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]:
if self.is_from_dataframe:
df = result.to_pandas()
return dataframe_to_documents(
df,
content="content",
metadata=self.df_metadata_columns,
doc_cls=self.config.document_class,
)
else:
records = result.to_arrow().to_pylist()
return self._records_to_docs(records)
def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]:
if self.config.flatten:
docs = [
self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records
]
else:
try:
docs = [self.schema(**rec) for rec in records]
except ValidationError as e:
raise ValueError(
f"""
Error validating LanceDB result: {e}
HINT: This could happen when you're re-using an
existing LanceDB store with a different schema.
Try deleting your local lancedb storage at `{self.config.storage_path}`
re-ingesting your documents and/or replacing the collections.
"""
)
doc_cls = self.config.document_class
doc_cls_field_names = doc_cls.model_fields.keys()
return [
doc_cls(
**{
field_name: getattr(doc, field_name)
for field_name in doc_cls_field_names
}
)
for doc in docs
]
def get_all_documents(self, where: str = "") -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
tbl = self.client.open_table(self.config.collection_name)
pre_result = tbl.search(None).where(where or None)
return self._lance_result_to_docs(pre_result)
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
_ids = [str(id) for id in ids]
tbl = self.client.open_table(self.config.collection_name)
docs = [
self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'"))
for _id in _ids
]
return docs
def similar_texts_with_scores(
self,
text: str,
k: int = 1,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_fn([text])[0]
tbl = self.client.open_table(self.config.collection_name)
result = (
tbl.search(embedding).metric(self.config.distance).where(where).limit(k)
)
docs = self._lance_result_to_docs(result)
# note _distance is 1 - cosine
if self.is_from_dataframe:
scores = [
1 - rec["_distance"] for rec in result.to_pandas().to_dict("records")
]
else:
scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()]
if len(docs) == 0:
logger.warning(f"No matches found for {text}")
return []
if settings.debug:
logger.info(f"Found {len(docs)} matches, max score: {max(scores)}")
doc_score_pairs = list(zip(docs, scores))
self.show_if_debug(doc_score_pairs)
return doc_score_pairs
def get_fts_chunks(
self,
query: str,
k: int = 5,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""
Uses LanceDB FTS (Full Text Search).
"""
# Clean up query: replace all newlines with spaces in query,
# force special search keywords to lower case, remove quotes,
# so it's not interpreted as code syntax
query_clean = (
query.replace("\n", " ")
.replace("AND", "and")
.replace("OR", "or")
.replace("NOT", "not")
.replace("'", "")
.replace('"', "")
)
tbl = self.client.open_table(self.config.collection_name)
tbl.create_fts_index(field_names="content", replace=True)
result = tbl.search(query_clean).where(where).limit(k).with_row_id(True)
docs = self._lance_result_to_docs(result)
scores = [r["score"] for r in result.to_list()]
return list(zip(docs, scores))
def _get_clean_vecdb_schema(self) -> str:
"""Get a cleaned schema of the vector-db, to pass to the LLM
as part of instructions on how to generate a SQL filter."""
if len(self.config.filter_fields) == 0:
filterable_fields = (
self.client.open_table(self.config.collection_name)
.search()
.limit(1)
.to_pandas(flatten=True)
.columns.tolist()
)
# drop id, vector, metadata.id, metadata.window_ids, metadata.is_chunk
for fields in [
"id",
"vector",
"metadata.id",
"metadata.window_ids",
"metadata.is_chunk",
]:
if fields in filterable_fields:
filterable_fields.remove(fields)
logger.warning(
f"""
No filter_fields set in config, so using these fields as filterable fields:
{filterable_fields}
"""
)
self.config.filter_fields = filterable_fields
if self.is_from_dataframe:
return self.is_from_dataframe
schema_dict = clean_schema(
self.schema,
excludes=["id", "vector"],
)
# intersect config.filter_fields with schema_dict.keys() in case
# there are extraneous fields in config.filter_fields
filter_fields_set = set(
self.config.filter_fields or schema_dict.keys()
).intersection(schema_dict.keys())
# remove 'content' from filter_fields_set, even if it's not in filter_fields_set
filter_fields_set.discard("content")
# possible values of filterable fields
filter_field_values = self.get_field_values(list(filter_fields_set))
# add field values to schema_dict as another field `values` for each field
for field, values in filter_field_values.items():
if field in schema_dict:
schema_dict[field]["values"] = values
# if self.config.filter_fields is set, restrict to these:
if len(self.config.filter_fields) > 0:
schema_dict = {
k: v for k, v in schema_dict.items() if k in self.config.filter_fields
}
schema = json.dumps(schema_dict, indent=2)
schema += f"""
NOTE when creating a filter for a query,
ONLY the following fields are allowed:
{",".join(self.config.filter_fields)}
"""
return schema
def get_field_values(self, fields: list[str]) -> Dict[str, str]:
"""Get string-listing of possible values of each filterable field,
e.g.
{
"genre": "crime, drama, mystery, ... (10 more)",
"certificate": "R, PG-13, PG, R",
}
"""
field_values: Dict[str, Set[str]] = {}
# make empty set for each field
for f in fields:
field_values[f] = set()
# get all documents and accumulate possible values of each field until 10
docs = self.get_all_documents()
for d in docs:
# extract fields from d
doc_field_vals = extract_fields(d, fields)
for field, val in doc_field_vals.items():
field_values[field].add(val)
# For each field make a string showing list of possible values,
# truncate to 20 values, and if there are more, indicate how many
# more there are, e.g. Genre: crime, drama, mystery, ... (20 more)
field_values_list = {}
for f in fields:
vals = list(field_values[f])
n = len(vals)
remaining = n - 20
vals = vals[:20]
if n > 20:
vals.append(f"(...{remaining} more)")
# make a string of the values, ensure they are strings
field_values_list[f] = ", ".join(str(v) for v in vals)
return field_values_list
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((1067, 1091), 'src.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1089, 1091), False, 'from src.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1569, 1608), 'src.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1590, 1608), False, 'from src.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2008, 2021), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2019, 2021), False, 'from dotenv import load_dotenv\n'), ((6584, 6639), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (6596, 6639), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7016, 7074), 'src.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7038, 7074), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((19539, 19591), 'src.utils.pydantic_utils.clean_schema', 'clean_schema', (['self.schema'], {'excludes': "['id', 'vector']"}), "(self.schema, excludes=['id', 'vector'])\n", (19551, 19591), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((20650, 20683), 'json.dumps', 'json.dumps', (['schema_dict'], {'indent': '(2)'}), '(schema_dict, indent=2)\n', (20660, 20683), False, 'import json\n'), ((2062, 2102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2077, 2102), False, 'import lancedb\n'), ((12047, 12111), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (12069, 12111), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((12824, 12922), 'src.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (12851, 12922), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13937, 14058), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (13959, 14058), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((21541, 21566), 'src.utils.pydantic_utils.extract_fields', 'extract_fields', (['d', 'fields'], {}), '(d, fields)\n', (21555, 21566), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2478, 2515), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2493, 2515), False, 'import lancedb\n'), ((6291, 6300), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6297, 6300), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((10171, 10206), 'src.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (10196, 10206), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14426, 14452), 'src.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (14447, 14452), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')] |
from datasets import load_dataset
import os
import lancedb
import getpass
import time
import argparse
from tqdm.auto import tqdm
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
def main(query=None):
if "COHERE_API_KEY" not in os.environ:
os.environ["COHERE_API_KEY"] = getpass.getpass("Enter your Cohere API key: ")
en = dataset = load_dataset(
"wikipedia",
"20220301.en",
streaming=True,
)
fr = load_dataset("wikipedia", "20220301.fr", streaming=True)
datasets = {"english": iter(en["train"]), "french": iter(fr["train"])}
registry = EmbeddingFunctionRegistry().get_instance()
cohere = registry.get(
"cohere"
).create() # uses multi-lingual model by default (768 dim)
class Schema(LanceModel):
vector: Vector(cohere.ndims()) = cohere.VectorField()
text: str = cohere.SourceField()
url: str
title: str
id: str
lang: str
db = lancedb.connect("~/lancedb")
tbl = (
db.create_table("wikipedia-cohere", schema=Schema, mode="overwrite")
if "wikipedia-cohere" not in db
else db.open_table("wikipedia-cohere")
)
# let's use cohere embeddings. Use can also set it to openai version of the table
batch_size = 1000
num_records = 10000
data = []
for i in tqdm(range(0, num_records, batch_size)):
for lang, dataset in datasets.items():
batch = [next(dataset) for _ in range(batch_size)]
texts = [x["text"] for x in batch]
ids = [f"{x['id']}-{lang}" for x in batch]
data.extend(
{
"text": x["text"],
"title": x["title"],
"url": x["url"],
"lang": lang,
"id": f"{lang}-{x['id']}",
}
for x in batch
)
# add in batches to avoid token limit
tbl.add(data)
data = []
print("Added batch. Sleeping for 20 seconds to avoid rate limit")
time.sleep(20) # wait for 20 seconds to avoid rate limit
if not query:
it = iter(fr["train"])
for i in range(5):
next(it)
query = next(it)
rs = tbl.search(query["text"]).limit(3).to_list()
print("Query: ", query["text"])
print("Results: ", rs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--query", type=str, default="", help="Query to search")
args = parser.parse_args()
main(query=args.query)
| [
"lancedb.embeddings.EmbeddingFunctionRegistry",
"lancedb.connect"
] | [((407, 463), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.en"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.en', streaming=True)\n", (419, 463), False, 'from datasets import load_dataset\n'), ((504, 560), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.fr"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.fr', streaming=True)\n", (516, 560), False, 'from datasets import load_dataset\n'), ((1018, 1046), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (1033, 1046), False, 'import lancedb\n'), ((2458, 2483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2481, 2483), False, 'import argparse\n'), ((340, 386), 'getpass.getpass', 'getpass.getpass', (['"""Enter your Cohere API key: """'], {}), "('Enter your Cohere API key: ')\n", (355, 386), False, 'import getpass\n'), ((2117, 2131), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (2127, 2131), False, 'import time\n'), ((653, 680), 'lancedb.embeddings.EmbeddingFunctionRegistry', 'EmbeddingFunctionRegistry', ([], {}), '()\n', (678, 680), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
from typing import Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from lancedb.pydantic import LanceModel, Vector
class Wine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
@model_validator(mode="before")
def _fill_country_unknowns(cls, values):
"Fill in missing country values with 'Unknown', as we always want this field to be queryable"
country = values.get("country")
if not country:
values["country"] = "Unknown"
return values
@model_validator(mode="before")
def _add_to_vectorize_fields(cls, values):
"Add a field to_vectorize that will be used to create sentence embeddings"
variety = values.get("variety", "")
title = values.get("title", "")
description = values.get("description", "")
to_vectorize = list(filter(None, [variety, title, description]))
values["to_vectorize"] = " ".join(to_vectorize).strip()
return values
class LanceModelWine(BaseModel):
"""
Pydantic model for LanceDB, with a vector field added for sentence embeddings
"""
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
to_vectorize: str
vector: Vector(384)
class SearchResult(LanceModel):
"Model to return search results"
model_config = ConfigDict(
extra="ignore",
json_schema_extra={
"example": {
"id": 374,
"title": "Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)",
"description": "Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.",
"country": "Italy",
"variety": "Sauvignon Blanc",
"price": 15,
"points": 88,
}
},
)
id: int
title: str
description: Optional[str]
country: Optional[str]
variety: Optional[str]
price: Optional[float]
points: Optional[int]
| [
"lancedb.pydantic.Vector"
] | [((189, 894), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (199, 894), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1355, 1386), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1360, 1386), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1576, 1606), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1591, 1606), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1888, 1918), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1903, 1918), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2662, 2693), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (2667, 2693), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2911, 2922), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (2917, 2922), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((3014, 3422), 'pydantic.ConfigDict', 'ConfigDict', ([], {'extra': '"""ignore"""', 'json_schema_extra': "{'example': {'id': 374, 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}}"}), "(extra='ignore', json_schema_extra={'example': {'id': 374,\n 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}})\n", (3024, 3422), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n')] |
from typing import Any
from lancedb.embeddings import EmbeddingFunctionRegistry
def register_model(model_name: str) -> Any:
"""
Register a model with the given name using LanceDB's EmbeddingFunctionRegistry.
Args:
model_name (str): The name of the model to register.
Returns:
model: The registered model instance.
Usage:
>>> model = register_model("open-clip")
"""
registry = EmbeddingFunctionRegistry.get_instance()
model = registry.get(model_name).create()
return model
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((430, 470), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (468, 470), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
#!/usr/bin/env python
import os
import lancedb
from lancedb.embeddings import with_embeddings
import openai
import pandas as pd
from pydantic import BaseModel, Field
import requests
from aifunctools.openai_funcs import complete_with_functions
openai.api_key = os.getenv("OPENAI_API_KEY")
MODEL = "gpt-3.5-turbo-16k-0613"
db = lancedb.connect(".lancedb")
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
def to_lancedb_table(db, memes):
df = pd.DataFrame([m.model_dump() for m in memes])
data = with_embeddings(embed_func, df, column="name")
if "memes" in db.table_names():
tbl = db.open_table("memes")
tbl.add(data, mode="overwrite")
else:
tbl = db.create_table("memes", data)
return tbl
class Meme(BaseModel):
id: str = Field(description="The meme id")
name: str = Field(description="The meme name")
url: str = Field(description="The meme url")
width: int = Field(description="The meme image width")
height: int = Field(description="The meme image height")
box_count: int = Field(description="The number of text boxes in the meme")
def get_memes():
"""
Get a list of memes from the meme api
"""
resp = requests.get("https://api.imgflip.com/get_memes")
return [Meme(**m) for m in resp.json()["data"]["memes"]]
def search_memes(query: str):
"""
Get the most popular memes from imgflip and do a semantic search based on the user query
:param query: str, the search string
"""
memes = get_memes()
tbl = to_lancedb_table(db, memes)
df = tbl.search(embed_func(query)[0]).limit(1).to_df()
return Meme(**df.to_dict(orient="records")[0]).model_dump()
if __name__ == "__main__":
question = "Please find me the image link for that popular meme with Fry from Futurama"
print(complete_with_functions(question, search_memes)["choices"][0]["message"]["content"])
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((263, 290), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (272, 290), False, 'import os\n'), ((331, 358), 'lancedb.connect', 'lancedb.connect', (['""".lancedb"""'], {}), "('.lancedb')\n", (346, 358), False, 'import lancedb\n'), ((389, 454), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (412, 454), False, 'import openai\n'), ((614, 660), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'column': '"""name"""'}), "(embed_func, df, column='name')\n", (629, 660), False, 'from lancedb.embeddings import with_embeddings\n'), ((883, 915), 'pydantic.Field', 'Field', ([], {'description': '"""The meme id"""'}), "(description='The meme id')\n", (888, 915), False, 'from pydantic import BaseModel, Field\n'), ((932, 966), 'pydantic.Field', 'Field', ([], {'description': '"""The meme name"""'}), "(description='The meme name')\n", (937, 966), False, 'from pydantic import BaseModel, Field\n'), ((982, 1015), 'pydantic.Field', 'Field', ([], {'description': '"""The meme url"""'}), "(description='The meme url')\n", (987, 1015), False, 'from pydantic import BaseModel, Field\n'), ((1033, 1074), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image width"""'}), "(description='The meme image width')\n", (1038, 1074), False, 'from pydantic import BaseModel, Field\n'), ((1093, 1135), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image height"""'}), "(description='The meme image height')\n", (1098, 1135), False, 'from pydantic import BaseModel, Field\n'), ((1157, 1214), 'pydantic.Field', 'Field', ([], {'description': '"""The number of text boxes in the meme"""'}), "(description='The number of text boxes in the meme')\n", (1162, 1214), False, 'from pydantic import BaseModel, Field\n'), ((1303, 1352), 'requests.get', 'requests.get', (['"""https://api.imgflip.com/get_memes"""'], {}), "('https://api.imgflip.com/get_memes')\n", (1315, 1352), False, 'import requests\n'), ((1913, 1960), 'aifunctools.openai_funcs.complete_with_functions', 'complete_with_functions', (['question', 'search_memes'], {}), '(question, search_memes)\n', (1936, 1960), False, 'from aifunctools.openai_funcs import complete_with_functions\n')] |
import lancedb
import uuid
from datetime import datetime
from tqdm import tqdm
from typing import Optional, List, Iterator, Dict
from memgpt.config import MemGPTConfig
from memgpt.connectors.storage import StorageConnector, TableType
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
from memgpt.data_types import Record, Message, Passage, Source
from datetime import datetime
from lancedb.pydantic import Vector, LanceModel
""" Initial implementation - not complete """
def get_db_model(table_name: str, table_type: TableType):
config = MemGPTConfig.load()
if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:
# create schema for archival memory
class PassageModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
id: uuid.UUID
user_id: str
text: str
doc_id: str
agent_id: str
data_source: str
embedding: Vector(config.embedding_dim)
metadata_: Dict
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Passage(
text=self.text,
embedding=self.embedding,
doc_id=self.doc_id,
user_id=self.user_id,
id=self.id,
data_source=self.data_source,
agent_id=self.agent_id,
metadata=self.metadata_,
)
return PassageModel
elif table_type == TableType.RECALL_MEMORY:
class MessageModel(LanceModel):
"""Defines data model for storing Message objects"""
__abstract__ = True # this line is necessary
# Assuming message_id is the primary key
id: uuid.UUID
user_id: str
agent_id: str
# openai info
role: str
text: str
model: str
user: str
# function info
function_name: str
function_args: str
function_response: str
embedding = Vector(config.embedding_dim)
# Add a datetime column, with default value as the current time
created_at = datetime
def __repr__(self):
return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Message(
user_id=self.user_id,
agent_id=self.agent_id,
role=self.role,
user=self.user,
text=self.text,
model=self.model,
function_name=self.function_name,
function_args=self.function_args,
function_response=self.function_response,
embedding=self.embedding,
created_at=self.created_at,
id=self.id,
)
"""Create database model for table_name"""
return MessageModel
elif table_type == TableType.DATA_SOURCES:
class SourceModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
# Assuming passage_id is the primary key
id: uuid.UUID
user_id: str
name: str
created_at: datetime
def __repr__(self):
return f"<Source(passage_id='{self.id}', name='{self.name}')>"
def to_record(self):
return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at)
"""Create database model for table_name"""
return SourceModel
else:
raise ValueError(f"Table type {table_type} not implemented")
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
# TODO
pass
def generate_where_filter(self, filters: Dict) -> str:
where_filters = []
for key, value in filters.items():
where_filters.append(f"{key}={value}")
return where_filters.join(" AND ")
@abstractmethod
def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:
# TODO
pass
@abstractmethod
def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:
# TODO
pass
@abstractmethod
def get(self, id: str) -> Optional[Record]:
# TODO
pass
@abstractmethod
def size(self, filters: Optional[Dict] = {}) -> int:
# TODO
pass
@abstractmethod
def insert(self, record: Record):
# TODO
pass
@abstractmethod
def insert_many(self, records: List[Record], show_progress=False):
# TODO
pass
@abstractmethod
def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:
# TODO
pass
@abstractmethod
def query_date(self, start_date, end_date):
# TODO
pass
@abstractmethod
def query_text(self, query):
# TODO
pass
@abstractmethod
def delete_table(self):
# TODO
pass
@abstractmethod
def delete(self, filters: Optional[Dict] = {}):
# TODO
pass
@abstractmethod
def save(self):
# TODO
pass
| [
"lancedb.pydantic.Vector"
] | [((622, 641), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (639, 641), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1077, 1105), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1083, 1105), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1333, 1523), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1340, 1523), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2335, 2363), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2341, 2363), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2674, 2989), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'user': 'self.user', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, user=\n self.user, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2681, 2989), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3815, 3904), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3821, 3904), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')] |
""" Install lancedb with instructor embedding support
copy this and paste it in the terminal, and install additional dependencies via requirements.txt file
pip install git+https://github.com/lancedb/lancedb.git@main#subdirectory=python
"""
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from lancedb.embeddings import InstructorEmbeddingFunction
instructor = (
get_registry()
.get("instructor")
.create(
source_instruction="represent the document for retreival",
query_instruction="represent the document for most similar definition",
)
)
class Schema(LanceModel):
vector: Vector(instructor.ndims()) = instructor.VectorField()
text: str = instructor.SourceField()
# Creating LanceDB table
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite")
data_f1 = [
{
"text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains."
},
{
"text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis."
},
{
"text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver."
},
{
"text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity."
},
{
"text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water."
},
{
"text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years."
},
{
"text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time."
},
{
"text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares."
},
{
"text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling."
},
{
"text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk."
},
{
"text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management."
},
{
"text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes."
},
{
"text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art."
},
{
"text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments."
},
{
"text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment."
},
{"text": "Renaissance was a cultural, artistic, and intellectual movement that"},
{
"text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics."
},
{
"text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics."
},
{
"text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances."
},
{
"text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine."
},
]
tbl.add(data_f1)
# LanceDB supports full text search, so there is no need of embedding the Query manually
query = "amoxicillin"
result = tbl.search(query).limit(1).to_pandas()
# printing the output
print(result)
#########################################################################################################################
################# SAME INPUT DATA WITH DIFFERENT INSTRUCTION PAIR #######################################################
#########################################################################################################################
# uncomment the below code to check for different instruction pair on the same data
"""instructor = get_registry().get("instructor").create(
source_instruction="represent the captions",
query_instruction="represent the captions for retrieving duplicate captions"
)
class Schema(LanceModel):
vector: Vector(instructor.ndims()) = instructor.VectorField()
text: str = instructor.SourceField()
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite")
data_f2 = [
{"text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains."},
{"text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis."},
{"text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver."},
{"text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity."},
{"text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water."},
{"text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years."},
{"text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time."},
{"text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares."},
{"text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling."},
{"text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk."},
{"text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management."},
{"text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes."},
{"text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art."},
{"text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments."},
{"text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment."},
{"text": "Renaissance was a cultural, artistic, and intellectual movement that"},
{"text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics."},
{"text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics."},
{"text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances."},
{"text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine."},
]
tbl.add(data_f2)
#same query, but for the differently embed data
query = "amoxicillin"
result = tbl.search(query).limit(1).to_pandas()
#showing the result
print(result)
"""
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((818, 847), 'lancedb.connect', 'lancedb.connect', (['"""~/.lancedb"""'], {}), "('~/.lancedb')\n", (833, 847), False, 'import lancedb\n'), ((445, 459), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (457, 459), False, 'from lancedb.embeddings import get_registry\n')] |
from pathlib import Path
from uuid import uuid4
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
import lancedb
from knowledge_graph.configuration.config import cfg
from lancedb import DBConnection
def check_if_embedding_exists(text: str):
db = lancedb.connect(cfg.db_path)
tbl_text = db.open_table("knowledge_graph_text")
df = tbl_text.search(text).to_pandas(flatten=True)
print(df.text)
if text in df.text.values.astype(str):
return True
else:
return False
async def create_embeddings_text(text: str):
db = lancedb.connect(cfg.db_path)
table_text = db.create_table(
name=f"knowledge_graph_text",
data=[
{
"vector": cfg.emb_func.embed_query("Placeholder"),
"text": "Placeholder",
"id": "1",
}
],
mode="overwrite",
)
text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0)
documents = text_splitter.split_text(text)
db_text = LanceDB.from_texts(documents, cfg.emb_func, connection=table_text)
return db_text
async def create_embeddings_summary(summary_path: Path):
db = lancedb.connect(cfg.db_path)
table_summary = db.create_table(
name=f"knowledge_graph_summary",
data=[
{
"vector": cfg.emb_func.embed_query("Placeholder"),
"text": "Placeholder",
"id": "1",
}
],
mode="overwrite",
)
loader = TextLoader(summary_path.as_posix())
docs_summary = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0)
doc = text_splitter.split_documents(docs_summary)
db_summary = LanceDB.from_documents(doc, cfg.emb_func, connection=table_summary)
return db_summary
async def similarity_search(query: str):
db = lancedb.connect(cfg.db_path)
tbl_text = db.open_table("knowledge_graph_text")
tbl_summary = db.open_table("knowledge_graph_summary")
vectorstore_text = LanceDB(tbl_text, cfg.emb_func)
result_text = vectorstore_text.similarity_search(query)
ans_text = result_text[0].page_content
vectorstore_summary = LanceDB(tbl_summary, cfg.emb_func)
result_summary = vectorstore_summary.similarity_search(query)
ans_summary = result_summary[0].page_content
return ans_text, ans_summary
if __name__ == "__main__":
input_val = """Animals are the most adorable and loving creatures existing on Earth. They might not be able to speak, but they can understand. They have a unique mode of interaction which is beyond human understanding. There are two types of animals: domestic and wild animals.
Domestic Animals | Domestic animals such as dogs, cows, cats, donkeys, mules and elephants are the ones which are used for the purpose of domestication. Wild animals refer to animals that are not normally domesticated and generally live in forests. They are important for their economic, survival, beauty, and scientific value.
Wild Animals | Wild animals provide various useful substances and animal products such as honey, leather, ivory, tusk, etc. They are of cultural asset and aesthetic value to humankind. Human life largely depends on wild animals for elementary requirements like the medicines we consume and the clothes we wear daily.
Nature and wildlife are largely associated with humans for several reasons, such as emotional and social issues. The balanced functioning of the biosphere depends on endless interactions among microorganisms, plants and animals. This has led to countless efforts by humans for the conservation of animals and to protect them from extinction. Animals have occupied a special place of preservation and veneration in various cultures worldwide."""
print(check_if_embedding_exists(input_val))
#path = Path(r"C:\tmp\graph_desc\graph_desc_310150f8-a4a8-4ba9-b1c7-07bc5b4944d1.txt")
#db = create_embeddings_summary(path)
#print(db)
| [
"lancedb.connect"
] | [((410, 438), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (425, 438), False, 'import lancedb\n'), ((715, 743), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (730, 743), False, 'import lancedb\n'), ((1060, 1125), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1081, 1125), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1187, 1253), 'langchain.vectorstores.LanceDB.from_texts', 'LanceDB.from_texts', (['documents', 'cfg.emb_func'], {'connection': 'table_text'}), '(documents, cfg.emb_func, connection=table_text)\n', (1205, 1253), False, 'from langchain.vectorstores import LanceDB\n'), ((1345, 1373), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (1360, 1373), False, 'import lancedb\n'), ((1778, 1843), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1799, 1843), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1915, 1982), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['doc', 'cfg.emb_func'], {'connection': 'table_summary'}), '(doc, cfg.emb_func, connection=table_summary)\n', (1937, 1982), False, 'from langchain.vectorstores import LanceDB\n'), ((2059, 2087), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (2074, 2087), False, 'import lancedb\n'), ((2224, 2255), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_text', 'cfg.emb_func'], {}), '(tbl_text, cfg.emb_func)\n', (2231, 2255), False, 'from langchain.vectorstores import LanceDB\n'), ((2387, 2421), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_summary', 'cfg.emb_func'], {}), '(tbl_summary, cfg.emb_func)\n', (2394, 2421), False, 'from langchain.vectorstores import LanceDB\n'), ((871, 910), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (895, 910), False, 'from knowledge_graph.configuration.config import cfg\n'), ((1511, 1550), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (1535, 1550), False, 'from knowledge_graph.configuration.config import cfg\n')] |
from glob import glob
from os.path import basename
from pathlib import Path
import chromadb
import lancedb
import pandas as pd
import torch
from chromadb.utils import embedding_functions
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
from loguru import logger
from rich import print
from rich.progress import track
MODEL_NAME = "all-distilroberta-v1"
DB_PATH = "db/lancedb-test"
TABLE_NAME = COLLECTION_NAME = "test"
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("sentence-transformers").create(
name="all-distilroberta-v1", device="cuda" if torch.cuda.is_available() else "cpu"
)
class Document(LanceModel):
document: str = func.SourceField()
embedding: Vector(func.ndims()) = func.VectorField()
source: str
def get_collection() -> chromadb.Collection:
chroma_client = chromadb.PersistentClient(DB_PATH)
try:
collection = chroma_client.get_collection(name=COLLECTION_NAME)
except Exception as e:
logger.exception(e)
logger.warning("Indexing documents...")
collection = chroma_client.create_collection(name=COLLECTION_NAME)
csvs = glob("crawled/*.csv")
sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=MODEL_NAME
)
)
data = []
for csv in track(csvs):
df = pd.read_csv(csv)
if len(df) == 0:
continue
urls, documents = df["URL"].tolist(), df["Section Content"].tolist()
embeddings = sentence_transformer_ef(documents)
assert len(urls) == len(documents) == len(embeddings)
base = basename(urls[0])
collection.add(
embeddings=embeddings,
documents=documents,
metadatas=[{"source": url} for url in urls],
ids=[f"{base}_{i}" for i in range(len(documents))],
)
return collection
def get_table():
uri = DB_PATH[:]
db = lancedb.connect(uri)
table = db.open_table(TABLE_NAME)
return table
| [
"lancedb.connect",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((489, 529), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (527, 529), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((881, 915), 'chromadb.PersistentClient', 'chromadb.PersistentClient', (['DB_PATH'], {}), '(DB_PATH)\n', (906, 915), False, 'import chromadb\n'), ((2082, 2102), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2097, 2102), False, 'import lancedb\n'), ((633, 658), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (656, 658), False, 'import torch\n'), ((1033, 1052), 'loguru.logger.exception', 'logger.exception', (['e'], {}), '(e)\n', (1049, 1052), False, 'from loguru import logger\n'), ((1061, 1100), 'loguru.logger.warning', 'logger.warning', (['"""Indexing documents..."""'], {}), "('Indexing documents...')\n", (1075, 1100), False, 'from loguru import logger\n'), ((1191, 1212), 'glob.glob', 'glob', (['"""crawled/*.csv"""'], {}), "('crawled/*.csv')\n", (1195, 1212), False, 'from glob import glob\n'), ((1261, 1340), 'chromadb.utils.embedding_functions.SentenceTransformerEmbeddingFunction', 'embedding_functions.SentenceTransformerEmbeddingFunction', ([], {'model_name': 'MODEL_NAME'}), '(model_name=MODEL_NAME)\n', (1317, 1340), False, 'from chromadb.utils import embedding_functions\n'), ((1418, 1429), 'rich.progress.track', 'track', (['csvs'], {}), '(csvs)\n', (1423, 1429), False, 'from rich.progress import track\n'), ((1448, 1464), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (1459, 1464), True, 'import pandas as pd\n'), ((1745, 1762), 'os.path.basename', 'basename', (['urls[0]'], {}), '(urls[0])\n', (1753, 1762), False, 'from os.path import basename\n')] |
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
batch['cls'] = batch['cls'].flatten().int().tolist()
box_cls_pair = sorted(zip(batch['bboxes'].tolist(), batch['cls']), key=lambda x: x[1])
batch['bboxes'] = [box for box, _ in box_cls_pair]
batch['cls'] = [cls for _, cls in box_cls_pair]
batch['labels'] = [dataset_info['names'][i] for i in batch['cls']]
batch['masks'] = batch['masks'].tolist() if 'masks' in batch else [[[]]]
batch['keypoints'] = batch['keypoints'].tolist() if 'keypoints' in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = similar_set.to_dict(
orient='list') if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get('im_file', [])
bboxes = similar_set.get('bboxes', []) if similar_set.get('bboxes') is not empty_boxes else []
masks = similar_set.get('masks') if similar_set.get('masks')[0] != empty_masks else []
kpts = similar_set.get('keypoints') if similar_set.get('keypoints')[0] != empty_masks else []
cls = similar_set.get('cls', [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(imgs,
batch_idx,
cls,
bboxes=boxes,
masks=masks,
kpts=kpts,
max_subplots=len(images),
save=False,
threaded=False)
def prompt_sql_query(query):
check_requirements('openai>=1.6.1')
from openai import OpenAI
if not SETTINGS['openai_api_key']:
logger.warning('OpenAI API key not found in settings. Please enter your API key below.')
openai_api_key = getpass.getpass('OpenAI API key: ')
SETTINGS.update({'openai_api_key': openai_api_key})
openai = OpenAI(api_key=SETTINGS['openai_api_key'])
messages = [
{
'role':
'system',
'content':
'''
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
'''},
{
'role': 'user',
'content': f'{query}'}, ]
response = openai.chat.completions.create(model='gpt-3.5-turbo', messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3411, 3433), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3419, 3433), True, 'import numpy as np\n'), ((3771, 3804), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (3785, 3804), True, 'import numpy as np\n'), ((4239, 4274), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4257, 4274), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4576, 4618), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4582, 4618), False, 'from openai import OpenAI\n'), ((695, 714), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (701, 714), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2463, 2478), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2473, 2478), False, 'import cv2\n'), ((2492, 2527), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2504, 2527), False, 'import cv2\n'), ((3446, 3474), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3454, 3474), True, 'import numpy as np\n'), ((3503, 3530), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3511, 3530), True, 'import numpy as np\n'), ((3542, 3575), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3556, 3575), True, 'import numpy as np\n'), ((3603, 3638), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3611, 3638), True, 'import numpy as np\n'), ((3725, 3754), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3733, 3754), True, 'import numpy as np\n'), ((4353, 4446), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4367, 4446), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4467, 4502), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4482, 4502), False, 'import getpass\n'), ((4511, 4562), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4526, 4562), False, 'from ultralytics.utils import SETTINGS\n'), ((3661, 3695), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3675, 3695), True, 'import numpy as np\n'), ((3831, 3858), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (3839, 3858), True, 'import numpy as np\n'), ((2788, 2825), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (2796, 2825), True, 'import numpy as np\n'), ((3209, 3244), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3217, 3244), True, 'import numpy as np\n'), ((3013, 3047), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3021, 3047), True, 'import numpy as np\n'), ((2622, 2656), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2631, 2656), False, 'from ultralytics.data.augment import LetterBox\n'), ((3085, 3119), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3094, 3119), False, 'from ultralytics.data.augment import LetterBox\n'), ((3355, 3392), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3363, 3392), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
from typing import Optional
from lancedb.pydantic import Vector
from pydantic import BaseModel, ConfigDict, Field, model_validator
class Wine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
@model_validator(mode="before")
def _fill_country_unknowns(cls, values):
"Fill in missing country values with 'Unknown', as we always want this field to be queryable"
country = values.get("country")
if not country:
values["country"] = "Unknown"
return values
@model_validator(mode="before")
def _add_to_vectorize_fields(cls, values):
"Add a field to_vectorize that will be used to create sentence embeddings"
variety = values.get("variety", "")
title = values.get("title", "")
description = values.get("description", "")
to_vectorize = list(filter(None, [variety, title, description]))
values["to_vectorize"] = " ".join(to_vectorize).strip()
return values
class LanceModelWine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
to_vectorize: str
vector: Vector(384)
| [
"lancedb.pydantic.Vector"
] | [((176, 881), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (186, 881), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1342, 1373), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1347, 1373), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1563, 1593), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1578, 1593), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1875, 1905), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1890, 1905), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2385, 3090), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (2395, 3090), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3551, 3582), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (3556, 3582), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3800, 3811), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (3806, 3811), False, 'from lancedb.pydantic import Vector\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from engine.data.augment import LetterBox
from engine.utils import LOGGER as logger
from engine.utils import SETTINGS
from engine.utils.checks import check_requirements
from engine.utils.ops import xyxy2xywh
from engine.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3664, 3686), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3672, 3686), True, 'import numpy as np\n'), ((3997, 4030), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4011, 4030), True, 'import numpy as np\n'), ((4364, 4399), 'engine.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4382, 4399), False, 'from engine.utils.checks import check_requirements\n'), ((4701, 4743), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4707, 4743), False, 'from openai import OpenAI\n'), ((768, 787), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (774, 787), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2716, 2731), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2726, 2731), False, 'import cv2\n'), ((2745, 2780), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2757, 2780), False, 'import cv2\n'), ((3699, 3727), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3707, 3727), True, 'import numpy as np\n'), ((3747, 3774), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3755, 3774), True, 'import numpy as np\n'), ((3786, 3819), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3800, 3819), True, 'import numpy as np\n'), ((3838, 3873), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3846, 3873), True, 'import numpy as np\n'), ((3951, 3980), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3959, 3980), True, 'import numpy as np\n'), ((4478, 4571), 'engine.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4492, 4571), True, 'from engine.utils import LOGGER as logger\n'), ((4592, 4627), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4607, 4627), False, 'import getpass\n'), ((4636, 4687), 'engine.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4651, 4687), False, 'from engine.utils import SETTINGS\n'), ((3896, 3930), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3910, 3930), True, 'import numpy as np\n'), ((4057, 4084), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4065, 4084), True, 'import numpy as np\n'), ((3041, 3078), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3049, 3078), True, 'import numpy as np\n'), ((3462, 3497), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3470, 3497), True, 'import numpy as np\n'), ((3266, 3300), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3274, 3300), True, 'import numpy as np\n'), ((2875, 2909), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2884, 2909), False, 'from engine.data.augment import LetterBox\n'), ((3338, 3372), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3347, 3372), False, 'from engine.data.augment import LetterBox\n'), ((3608, 3645), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3616, 3645), True, 'import numpy as np\n')] |
import json
import lancedb
import pytest
from lancedb.utils.events import _Events
@pytest.fixture(autouse=True)
def request_log_path(tmp_path):
return tmp_path / "request.json"
def mock_register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance.enabled = True
_Events._instance.rate_limit = 0
_Events._instance(name, **kwargs)
def test_event_reporting(monkeypatch, request_log_path, tmp_path) -> None:
def mock_request(**kwargs):
json_data = kwargs.get("json", {})
with open(request_log_path, "w") as f:
json.dump(json_data, f)
monkeypatch.setattr(
lancedb.table, "register_event", mock_register_event
) # Force enable registering events and strip exception handling
monkeypatch.setattr(lancedb.utils.events, "threaded_request", mock_request)
db = lancedb.connect(tmp_path)
db.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
mode="overwrite",
)
assert request_log_path.exists() # test if event was registered
with open(request_log_path, "r") as f:
json_data = json.load(f)
# TODO: don't hardcode these here. Instead create a module level json scehma in
# lancedb.utils.events for better evolvability
batch_keys = ["api_key", "distinct_id", "batch"]
event_keys = ["event", "properties", "timestamp", "distinct_id"]
property_keys = ["cli", "install", "platforms", "version", "session_id"]
assert all([key in json_data for key in batch_keys])
assert all([key in json_data["batch"][0] for key in event_keys])
assert all([key in json_data["batch"][0]["properties"] for key in property_keys])
# cleanup & reset
monkeypatch.undo()
_Events._instance = None
| [
"lancedb.utils.events._Events._instance",
"lancedb.connect",
"lancedb.utils.events._Events"
] | [((86, 114), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (100, 114), False, 'import pytest\n'), ((383, 416), 'lancedb.utils.events._Events._instance', '_Events._instance', (['name'], {}), '(name, **kwargs)\n', (400, 416), False, 'from lancedb.utils.events import _Events\n'), ((899, 924), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (914, 924), False, 'import lancedb\n'), ((294, 303), 'lancedb.utils.events._Events', '_Events', ([], {}), '()\n', (301, 303), False, 'from lancedb.utils.events import _Events\n'), ((1287, 1299), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1296, 1299), False, 'import json\n'), ((628, 651), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (637, 651), False, 'import json\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from pathlib import Path
from typing import Any, Callable
from lancedb import DBConnection as LanceDBConnection
from lancedb import connect as lancedb_connect
from lancedb.table import Table as LanceDBTable
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
return super().model_post_init(__context)
def query(self, question: str) -> str:
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
| [
"lancedb.connect"
] | [((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')] |
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
LatexTextSplitter,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
import argparse, os, arxiv
os.environ["OPENAI_API_KEY"] = "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"
embeddings = OpenAIEmbeddings()
from langchain.vectorstores import LanceDB
from lancedb.pydantic import Vector, LanceModel
from Typing import List
from datetime import datetime
import lancedb
global embedding_out_length
embedding_out_length = 1536
class Content(LanceModel):
id: str
arxiv_id: str
vector: Vector(embedding_out_length)
text: str
uploaded_date: datetime
title: str
authors: List[str]
abstract: str
categories: List[str]
url: str
def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1):
pass
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files")
argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored")
argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created")
argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive")
argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key")
argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF")
argparser.add_argument('-n', '--nthreads', type=int, default=-1)
args = argparser.parse_args()
SRC_DIR = args.src_dir
DB_NAME = args.db_name
TABLE_NAME = args.table_name
OPENAI_API_KEY = args.openai_api_key
NTHREADS = args.nthreads
db = lancedb.connect(DB_NAME)
table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite")
db = lancedb.connect()
meta_data = {"arxiv_id": "1", "title": "EIC LLM",
"category" : "N/A",
"authors": "N/A",
"sub_categories": "N/A",
"abstract": "N/A",
"published": "N/A",
"updated": "N/A",
"doi": "N/A"
},
table = db.create_table(
"EIC_archive",
data=[
{
"vector": embeddings.embed_query("EIC LLM"),
"text": "EIC LLM",
"id": "1",
"arxiv_id" : "N/A",
"title" : "N/A",
"category" : "N/A",
"published" : "N/A"
}
],
mode="overwrite",
)
vectorstore = LanceDB(connection = table, embedding = embeddings)
sourcedir = "PDFs"
count = 0
for source in os.listdir(sourcedir):
if not os.path.isdir(os.path.join("PDFs", source)):
continue
print (f"Adding the source document {source} to the Vector DB")
import arxiv
client = arxiv.Client()
search = arxiv.Search(id_list=[source])
paper = next(arxiv.Client().results(search))
meta_data = {"arxiv_id": paper.entry_id,
"title": paper.title,
"category" : categories[paper.primary_category],
"published": paper.published
}
for file in os.listdir(os.path.join(sourcedir, source)):
if file.endswith(".tex"):
latex_file = os.path.join(sourcedir, source, file)
print (source, latex_file)
documents = TextLoader(latex_file, encoding = 'latin-1').load()
latex_splitter = LatexTextSplitter(
chunk_size=120, chunk_overlap=10
)
documents = latex_splitter.split_documents(documents)
for doc in documents:
for k, v in meta_data.items():
doc.metadata[k] = v
vectorstore.add_documents(documents = documents)
count+=len(documents) | [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
import os
import argparse
import lancedb
from lancedb.context import contextualize
from lancedb.embeddings import with_embeddings
from datasets import load_dataset
import openai
import pytest
import subprocess
from main import embed_func, create_prompt, complete
# DOWNLOAD ==============================================================
subprocess.Popen(
"wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl",
shell=True,
).wait()
# Testing ===========================================================
@pytest.fixture
def mock_embed_func(monkeypatch):
def mock_api_call(*args, **kwargs):
return {"data": [{"embedding": [0.5]} for _ in range(10)]}
monkeypatch.setattr(openai.Embedding, "create", mock_api_call)
@pytest.fixture
def mock_complete(monkeypatch):
def mock_api_call(*args, **kwargs):
return {"choices": [{"text": "test"}]}
monkeypatch.setattr(openai.Completion, "create", mock_api_call)
def test_main(mock_embed_func, mock_complete):
args = argparse.Namespace(
query="test",
context_length=3,
window_size=20,
stride=4,
openai_key="test",
model="test",
)
db = lancedb.connect("~/tmp/lancedb")
table_name = "youtube-chatbot"
if table_name not in db.table_names():
data = load_dataset("jamescalam/youtube-transcriptions", split="train")
df = (
contextualize(data.to_pandas())
.groupby("title")
.text_col("text")
.window(args.window_size)
.stride(args.stride)
.to_df()
)
df = df.iloc[:10].reset_index(drop=True)
print(df.shape)
data = with_embeddings(embed_func, df, show_progress=True)
data.to_pandas().head(1)
tbl = db.create_table(table_name, data)
print(f"Created LaneDB table of length: {len(tbl)}")
else:
tbl = db.open_table(table_name)
load_dataset("jamescalam/youtube-transcriptions", split="train")
emb = embed_func(args.query)[0]
context = tbl.search(emb).limit(args.context_length).to_df()
prompt = create_prompt(args.query, context)
complete(prompt)
top_match = context.iloc[0]
print(f"Top Match: {top_match['url']}&t={top_match['start']}")
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((1071, 1184), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'context_length': '(3)', 'window_size': '(20)', 'stride': '(4)', 'openai_key': '"""test"""', 'model': '"""test"""'}), "(query='test', context_length=3, window_size=20, stride=4,\n openai_key='test', model='test')\n", (1089, 1184), False, 'import argparse\n'), ((1246, 1278), 'lancedb.connect', 'lancedb.connect', (['"""~/tmp/lancedb"""'], {}), "('~/tmp/lancedb')\n", (1261, 1278), False, 'import lancedb\n'), ((1995, 2059), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (2007, 2059), False, 'from datasets import load_dataset\n'), ((2174, 2208), 'main.create_prompt', 'create_prompt', (['args.query', 'context'], {}), '(args.query, context)\n', (2187, 2208), False, 'from main import embed_func, create_prompt, complete\n'), ((2213, 2229), 'main.complete', 'complete', (['prompt'], {}), '(prompt)\n', (2221, 2229), False, 'from main import embed_func, create_prompt, complete\n'), ((339, 498), 'subprocess.Popen', 'subprocess.Popen', (['"""wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl"""'], {'shell': '(True)'}), "(\n 'wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl'\n , shell=True)\n", (355, 498), False, 'import subprocess\n'), ((1372, 1436), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (1384, 1436), False, 'from datasets import load_dataset\n'), ((1746, 1797), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'show_progress': '(True)'}), '(embed_func, df, show_progress=True)\n', (1761, 1797), False, 'from lancedb.embeddings import with_embeddings\n'), ((2070, 2092), 'main.embed_func', 'embed_func', (['args.query'], {}), '(args.query)\n', (2080, 2092), False, 'from main import embed_func, create_prompt, complete\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from copy import copy
from datetime import date, datetime, timedelta
from pathlib import Path
from time import sleep
from typing import List
from unittest.mock import PropertyMock, patch
import lance
import lancedb
import numpy as np
import pandas as pd
import polars as pl
import pyarrow as pa
import pytest
import pytest_asyncio
from lancedb.conftest import MockTextEmbeddingFunction
from lancedb.db import AsyncConnection, LanceDBConnection
from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
from lancedb.table import LanceTable
from pydantic import BaseModel
class MockDB:
def __init__(self, uri: Path):
self.uri = uri
self.read_consistency_interval = None
@functools.cached_property
def is_managed_remote(self) -> bool:
return False
@pytest.fixture
def db(tmp_path) -> MockDB:
return MockDB(tmp_path)
@pytest_asyncio.fixture
async def db_async(tmp_path) -> AsyncConnection:
return await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=0)
)
def test_basic(db):
ds = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
).to_lance()
table = LanceTable(db, "test")
assert table.name == "test"
assert table.schema == ds.schema
assert table.to_lance().to_table() == ds.to_table()
@pytest.mark.asyncio
async def test_close(db_async: AsyncConnection):
table = await db_async.create_table("some_table", data=[{"id": 0}])
assert table.is_open()
table.close()
assert not table.is_open()
with pytest.raises(Exception, match="Table some_table is closed"):
await table.count_rows()
assert str(table) == "ClosedTable(some_table)"
@pytest.mark.asyncio
async def test_update_async(db_async: AsyncConnection):
table = await db_async.create_table("some_table", data=[{"id": 0}])
assert await table.count_rows("id == 0") == 1
assert await table.count_rows("id == 7") == 0
await table.update({"id": 7})
assert await table.count_rows("id == 7") == 1
assert await table.count_rows("id == 0") == 0
await table.add([{"id": 2}])
await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
assert await table.count_rows("id == 7") == 1
assert await table.count_rows("id == 2") == 0
assert await table.count_rows("id == 5") == 1
await table.update({"id": 10}, where="id == 5")
assert await table.count_rows("id == 10") == 1
def test_create_table(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float32()),
]
)
expected = pa.Table.from_arrays(
[
pa.FixedSizeListArray.from_arrays(pa.array([3.1, 4.1, 5.9, 26.5]), 2),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
schema=schema,
)
data = [
[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
]
]
df = pd.DataFrame(data[0])
data.append(df)
data.append(pa.Table.from_pandas(df, schema=schema))
for i, d in enumerate(data):
tbl = (
LanceTable.create(db, f"test_{i}", data=d, schema=schema)
.to_lance()
.to_table()
)
assert expected == tbl
def test_empty_table(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float32()),
]
)
tbl = LanceTable.create(db, "test", schema=schema)
data = [
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
]
tbl.add(data=data)
def test_add(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float64()),
]
)
table = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
_add(table, schema)
table = LanceTable.create(db, "test2", schema=schema)
table.add(
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
_add(table, schema)
def test_add_pydantic_model(db):
# https://github.com/lancedb/lancedb/issues/562
class Metadata(BaseModel):
source: str
timestamp: datetime
class Document(BaseModel):
content: str
meta: Metadata
class LanceSchema(LanceModel):
id: str
vector: Vector(2)
li: List[int]
payload: Document
tbl = LanceTable.create(db, "mytable", schema=LanceSchema, mode="overwrite")
assert tbl.schema == LanceSchema.to_arrow_schema()
# add works
expected = LanceSchema(
id="id",
vector=[0.0, 0.0],
li=[1, 2, 3],
payload=Document(
content="foo", meta=Metadata(source="bar", timestamp=datetime.now())
),
)
tbl.add([expected])
result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0]
assert result == expected
flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=1)
assert len(flattened.columns) == 6 # _distance is automatically added
really_flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=True)
assert len(really_flattened.columns) == 7
@pytest.mark.asyncio
async def test_add_async(db_async: AsyncConnection):
table = await db_async.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
assert await table.count_rows() == 2
await table.add(
data=[
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
],
)
table = await db_async.open_table("test")
assert await table.count_rows() == 3
def test_polars(db):
data = {
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
# Ingest polars dataframe
table = LanceTable.create(db, "test", data=pl.DataFrame(data))
assert len(table) == 2
result = table.to_pandas()
assert np.allclose(result["vector"].tolist(), data["vector"])
assert result["item"].tolist() == data["item"]
assert np.allclose(result["price"].tolist(), data["price"])
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.large_string()),
pa.field("price", pa.float64()),
]
)
assert table.schema == schema
# search results to polars dataframe
q = [3.1, 4.1]
result = table.search(q).limit(1).to_polars()
assert np.allclose(result["vector"][0], q)
assert result["item"][0] == "foo"
assert np.allclose(result["price"][0], 10.0)
# enter table to polars dataframe
result = table.to_polars()
assert np.allclose(result.collect()["vector"].to_list(), data["vector"])
# make sure filtering isn't broken
filtered_result = result.filter(pl.col("item").is_in(["foo", "bar"])).collect()
assert len(filtered_result) == 2
def _add(table, schema):
# table = LanceTable(db, "test")
assert len(table) == 2
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table) == 3
expected = pa.Table.from_arrays(
[
pa.FixedSizeListArray.from_arrays(
pa.array([3.1, 4.1, 5.9, 26.5, 6.3, 100.5]), 2
),
pa.array(["foo", "bar", "new"]),
pa.array([10.0, 20.0, 30.0]),
],
schema=schema,
)
assert expected == table.to_arrow()
def test_versioning(db):
table = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
assert len(table.list_versions()) == 2
assert table.version == 2
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 3
table.checkout(2)
assert table.version == 2
assert len(table) == 2
def test_create_index_method():
with patch.object(
LanceTable, "_dataset_mut", new_callable=PropertyMock
) as mock_dataset:
# Setup mock responses
mock_dataset.return_value.create_index.return_value = None
# Create a LanceTable object
connection = LanceDBConnection(uri="mock.uri")
table = LanceTable(connection, "test_table")
# Call the create_index method
table.create_index(
metric="L2",
num_partitions=256,
num_sub_vectors=96,
vector_column_name="vector",
replace=True,
index_cache_size=256,
)
# Check that the _dataset.create_index method was called
# with the right parameters
mock_dataset.return_value.create_index.assert_called_once_with(
column="vector",
index_type="IVF_PQ",
metric="L2",
num_partitions=256,
num_sub_vectors=96,
replace=True,
accelerator=None,
index_cache_size=256,
)
def test_add_with_nans(db):
# by default we raise an error on bad input vectors
bad_data = [
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [5], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
{"vector": [np.nan, 5.0], "item": "bar", "price": 20.0},
]
for row in bad_data:
with pytest.raises(ValueError):
LanceTable.create(
db,
"error_test",
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, row],
)
table = LanceTable.create(
db,
"drop_test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [5], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
],
on_bad_vectors="drop",
)
assert len(table) == 1
# We can fill bad input with some value
table = LanceTable.create(
db,
"fill_test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
],
on_bad_vectors="fill",
fill_value=0.0,
)
assert len(table) == 3
arrow_tbl = table.to_lance().to_table(filter="item == 'bar'")
v = arrow_tbl["vector"].to_pylist()[0]
assert np.allclose(v, np.array([0.0, 0.0]))
def test_restore(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "type": "vector"}],
)
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
table.restore(2)
assert len(table.list_versions()) == 4
assert len(table) == 1
expected = table.to_arrow()
table.checkout(2)
table.restore()
assert len(table.list_versions()) == 5
assert table.to_arrow() == expected
table.restore(5) # latest version should be no-op
assert len(table.list_versions()) == 5
with pytest.raises(ValueError):
table.restore(6)
with pytest.raises(ValueError):
table.restore(0)
def test_merge(db, tmp_path):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
other_table = pa.table({"document": ["foo", "bar"], "id": [0, 1]})
table.merge(other_table, left_on="id")
assert len(table.list_versions()) == 3
expected = pa.table(
{"vector": [[1.1, 0.9], [1.2, 1.9]], "id": [0, 1], "document": ["foo", "bar"]},
schema=table.schema,
)
assert table.to_arrow() == expected
other_dataset = lance.write_dataset(other_table, tmp_path / "other_table.lance")
table.restore(1)
table.merge(other_dataset, left_on="id")
def test_delete(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.delete("id=0")
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 1
assert table.to_pandas()["id"].tolist() == [1]
def test_update(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.update(where="id=0", values={"vector": [1.1, 1.1]})
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 2
v = table.to_arrow()["vector"].combine_chunks()
v = v.values.to_numpy().reshape(2, 2)
assert np.allclose(v, np.array([[1.2, 1.9], [1.1, 1.1]]))
def test_update_types(db):
table = LanceTable.create(
db,
"my_table",
data=[
{
"id": 0,
"str": "foo",
"float": 1.1,
"timestamp": datetime(2021, 1, 1),
"date": date(2021, 1, 1),
"vector1": [1.0, 0.0],
"vector2": [1.0, 1.0],
}
],
)
# Update with SQL
table.update(
values_sql=dict(
id="1",
str="'bar'",
float="2.2",
timestamp="TIMESTAMP '2021-01-02 00:00:00'",
date="DATE '2021-01-02'",
vector1="[2.0, 2.0]",
vector2="[3.0, 3.0]",
)
)
actual = table.to_arrow().to_pylist()[0]
expected = dict(
id=1,
str="bar",
float=2.2,
timestamp=datetime(2021, 1, 2),
date=date(2021, 1, 2),
vector1=[2.0, 2.0],
vector2=[3.0, 3.0],
)
assert actual == expected
# Update with values
table.update(
values=dict(
id=2,
str="baz",
float=3.3,
timestamp=datetime(2021, 1, 3),
date=date(2021, 1, 3),
vector1=[3.0, 3.0],
vector2=np.array([4.0, 4.0]),
)
)
actual = table.to_arrow().to_pylist()[0]
expected = dict(
id=2,
str="baz",
float=3.3,
timestamp=datetime(2021, 1, 3),
date=date(2021, 1, 3),
vector1=[3.0, 3.0],
vector2=[4.0, 4.0],
)
assert actual == expected
def test_merge_insert(db):
table = LanceTable.create(
db,
"my_table",
data=pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]}),
)
assert len(table) == 3
version = table.version
new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
# upsert
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().execute(new_data)
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# conditional update
table.merge_insert("a").when_matched_update_all(where="target.b = 'b'").execute(
new_data
)
expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# insert-if-not-exists
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
new_data = pa.table({"a": [2, 4], "b": ["x", "z"]})
# replace-range
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete(
"a > 2"
).execute(new_data)
expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# replace-range no condition
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete().execute(
new_data
)
expected = pa.table({"a": [2, 4], "b": ["x", "z"]})
assert table.to_arrow().sort_by("a") == expected
def test_create_with_embedding_function(db):
class MyTable(LanceModel):
text: str
vector: Vector(10)
func = MockTextEmbeddingFunction()
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts, "vector": func.compute_source_embeddings(texts)})
conf = EmbeddingFunctionConfig(
source_column="text", vector_column="vector", function=func
)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
embedding_functions=[conf],
)
table.add(df)
query_str = "hi how are you?"
query_vector = func.compute_query_embeddings(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_create_f16_table(db):
class MyTable(LanceModel):
text: str
vector: Vector(128, value_type=pa.float16())
df = pd.DataFrame(
{
"text": [f"s-{i}" for i in range(10000)],
"vector": [np.random.randn(128).astype(np.float16) for _ in range(10000)],
}
)
table = LanceTable.create(
db,
"f16_tbl",
schema=MyTable,
)
table.add(df)
table.create_index(num_partitions=2, num_sub_vectors=8)
query = df.vector.iloc[2]
expected = table.search(query).limit(2).to_arrow()
assert "s-2" in expected["text"].to_pylist()
def test_add_with_embedding_function(db):
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
class MyTable(LanceModel):
text: str = emb.SourceField()
vector: Vector(emb.ndims()) = emb.VectorField()
table = LanceTable.create(db, "my_table", schema=MyTable)
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts})
table.add(df)
texts = ["the quick brown fox", "jumped over the lazy dog"]
table.add([{"text": t} for t in texts])
query_str = "hi how are you?"
query_vector = emb.compute_query_embeddings(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_multiple_vector_columns(db):
class MyTable(LanceModel):
text: str
vector1: Vector(10)
vector2: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector1": v1, "vector2": v2, "text": "foo"},
{"vector1": v2, "vector2": v1, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
result1 = table.search(q, vector_column_name="vector1").limit(1).to_pandas()
result2 = table.search(q, vector_column_name="vector2").limit(1).to_pandas()
assert result1["text"].iloc[0] != result2["text"].iloc[0]
def test_create_scalar_index(db):
vec_array = pa.array(
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], pa.list_(pa.float32(), 2)
)
test_data = pa.Table.from_pydict(
{"x": ["c", "b", "a", "e", "b"], "y": [1, 2, 3, 4, 5], "vector": vec_array}
)
table = LanceTable.create(
db,
"my_table",
data=test_data,
)
table.create_scalar_index("x")
indices = table.to_lance().list_indices()
assert len(indices) == 1
scalar_index = indices[0]
assert scalar_index["type"] == "Scalar"
# Confirm that prefiltering still works with the scalar index column
results = table.search().where("x = 'c'").to_arrow()
assert results == test_data.slice(0, 1)
results = table.search([5, 5]).to_arrow()
assert results["_distance"][0].as_py() == 0
results = table.search([5, 5]).where("x != 'b'").to_arrow()
assert results["_distance"][0].as_py() > 0
def test_empty_query(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
df = table.search().select(["id"]).where("text='bar'").limit(1).to_pandas()
val = df.id.iloc[0]
assert val == 1
table = LanceTable.create(db, "my_table2", data=[{"id": i} for i in range(100)])
df = table.search().select(["id"]).to_pandas()
assert len(df) == 10
df = table.search().select(["id"]).limit(None).to_pandas()
assert len(df) == 100
df = table.search().select(["id"]).limit(-1).to_pandas()
assert len(df) == 100
def test_search_with_schema_inf_single_vector(db):
class MyTable(LanceModel):
text: str
vector_col: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector_col": v1, "text": "foo"},
{"vector_col": v2, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
result1 = table.search(q, vector_column_name="vector_col").limit(1).to_pandas()
result2 = table.search(q).limit(1).to_pandas()
assert result1["text"].iloc[0] == result2["text"].iloc[0]
def test_search_with_schema_inf_multiple_vector(db):
class MyTable(LanceModel):
text: str
vector1: Vector(10)
vector2: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector1": v1, "vector2": v2, "text": "foo"},
{"vector1": v2, "vector2": v1, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
with pytest.raises(ValueError):
table.search(q).limit(1).to_pandas()
def test_compact_cleanup(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
table.add([{"text": "baz", "id": 2}])
assert len(table) == 3
assert table.version == 3
stats = table.compact_files()
assert len(table) == 3
# Compact_files bump 2 versions.
assert table.version == 5
assert stats.fragments_removed > 0
assert stats.fragments_added == 1
stats = table.cleanup_old_versions()
assert stats.bytes_removed == 0
stats = table.cleanup_old_versions(older_than=timedelta(0), delete_unverified=True)
assert stats.bytes_removed > 0
assert table.version == 5
with pytest.raises(Exception, match="Version 3 no longer exists"):
table.checkout(3)
def test_count_rows(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
assert len(table) == 2
assert table.count_rows() == 2
assert table.count_rows(filter="text='bar'") == 1
def test_hybrid_search(db, tmp_path):
# This test uses an FTS index
pytest.importorskip("lancedb.fts")
db = MockDB(str(tmp_path))
# Create a LanceDB table schema with a vector and a text column
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
class MyTable(LanceModel):
text: str = emb.SourceField()
vector: Vector(emb.ndims()) = emb.VectorField()
# Initialize the table using the schema
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
# Create a list of 10 unique english phrases
phrases = [
"great kid don't get cocky",
"now that's a name I haven't heard in a long time",
"if you strike me down I shall become more powerful than you imagine",
"I find your lack of faith disturbing",
"I've got a bad feeling about this",
"never tell me the odds",
"I am your father",
"somebody has to save our skins",
"New strategy R2 let the wookiee win",
"Arrrrggghhhhhhh",
]
# Add the phrases and vectors to the table
table.add([{"text": p} for p in phrases])
# Create a fts index
table.create_fts_index("text")
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score")
.to_pydantic(MyTable)
)
result2 = ( # noqa
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="rank")
.to_pydantic(MyTable)
)
result3 = table.search(
"Our father who art in heaven", query_type="hybrid"
).to_pydantic(MyTable)
assert result1 == result3
# with post filters
result = (
table.search("Arrrrggghhhhhhh", query_type="hybrid")
.where("text='Arrrrggghhhhhhh'")
.to_list()
)
len(result) == 1
@pytest.mark.parametrize(
"consistency_interval", [None, timedelta(seconds=0), timedelta(seconds=0.1)]
)
def test_consistency(tmp_path, consistency_interval):
db = lancedb.connect(tmp_path)
table = LanceTable.create(db, "my_table", data=[{"id": 0}])
db2 = lancedb.connect(tmp_path, read_consistency_interval=consistency_interval)
table2 = db2.open_table("my_table")
assert table2.version == table.version
table.add([{"id": 1}])
if consistency_interval is None:
assert table2.version == table.version - 1
table2.checkout_latest()
assert table2.version == table.version
elif consistency_interval == timedelta(seconds=0):
assert table2.version == table.version
else:
# (consistency_interval == timedelta(seconds=0.1)
assert table2.version == table.version - 1
sleep(0.1)
assert table2.version == table.version
def test_restore_consistency(tmp_path):
db = lancedb.connect(tmp_path)
table = LanceTable.create(db, "my_table", data=[{"id": 0}])
db2 = lancedb.connect(tmp_path, read_consistency_interval=timedelta(seconds=0))
table2 = db2.open_table("my_table")
assert table2.version == table.version
# If we call checkout, it should lose consistency
table_fixed = copy(table2)
table_fixed.checkout(table.version)
# But if we call checkout_latest, it should be consistent again
table_ref_latest = copy(table_fixed)
table_ref_latest.checkout_latest()
table.add([{"id": 2}])
assert table_fixed.version == table.version - 1
assert table_ref_latest.version == table.version
# Schema evolution
def test_add_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1]})
table = LanceTable.create(db, "my_table", data=data)
table.add_columns({"new_col": "id + 2"})
assert table.to_arrow().column_names == ["id", "new_col"]
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
def test_alter_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1]})
table = LanceTable.create(db, "my_table", data=data)
table.alter_columns({"path": "id", "rename": "new_id"})
assert table.to_arrow().column_names == ["new_id"]
def test_drop_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
table = LanceTable.create(db, "my_table", data=data)
table.drop_columns(["category"])
assert table.to_arrow().column_names == ["id"]
@pytest.mark.asyncio
async def test_time_travel(db_async: AsyncConnection):
# Setup
table = await db_async.create_table("some_table", data=[{"id": 0}])
version = await table.version()
await table.add([{"id": 1}])
assert await table.count_rows() == 2
# Make sure we can rewind
await table.checkout(version)
assert await table.count_rows() == 1
# Can't add data in time travel mode
with pytest.raises(
ValueError,
match="table cannot be modified when a specific version is checked out",
):
await table.add([{"id": 2}])
# Can go back to normal mode
await table.checkout_latest()
assert await table.count_rows() == 2
# Should be able to add data again
await table.add([{"id": 3}])
assert await table.count_rows() == 3
# Now checkout and restore
await table.checkout(version)
await table.restore()
assert await table.count_rows() == 1
# Should be able to add data
await table.add([{"id": 4}])
assert await table.count_rows() == 2
# Can't use restore if not checked out
with pytest.raises(ValueError, match="checkout before running restore"):
await table.restore()
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance",
"lancedb.conftest.MockTextEmbeddingFunction",
"lancedb.db.LanceDBConnection",
"lancedb.embeddings.EmbeddingFunctionConfig",
"lancedb.table.LanceTable",
"lancedb.pydantic.Vector",
"lancedb.connect",
"lancedb.table.LanceTable.create"
] | [((1992, 2014), 'lancedb.table.LanceTable', 'LanceTable', (['db', '"""test"""'], {}), "(db, 'test')\n", (2002, 2014), False, 'from lancedb.table import LanceTable\n'), ((3907, 3928), 'pandas.DataFrame', 'pd.DataFrame', (['data[0]'], {}), '(data[0])\n', (3919, 3928), True, 'import pandas as pd\n'), ((4450, 4494), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'schema': 'schema'}), "(db, 'test', schema=schema)\n", (4467, 4494), False, 'from lancedb.table import LanceTable\n'), ((4892, 5041), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (4909, 5041), False, 'from lancedb.table import LanceTable\n'), ((5141, 5186), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test2"""'], {'schema': 'schema'}), "(db, 'test2', schema=schema)\n", (5158, 5186), False, 'from lancedb.table import LanceTable\n'), ((5771, 5841), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""mytable"""'], {'schema': 'LanceSchema', 'mode': '"""overwrite"""'}), "(db, 'mytable', schema=LanceSchema, mode='overwrite')\n", (5788, 5841), False, 'from lancedb.table import LanceTable\n'), ((7925, 7960), 'numpy.allclose', 'np.allclose', (["result['vector'][0]", 'q'], {}), "(result['vector'][0], q)\n", (7936, 7960), True, 'import numpy as np\n'), ((8010, 8047), 'numpy.allclose', 'np.allclose', (["result['price'][0]", '(10.0)'], {}), "(result['price'][0], 10.0)\n", (8021, 8047), True, 'import numpy as np\n'), ((8926, 9075), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (8943, 9075), False, 'from lancedb.table import LanceTable\n'), ((11156, 11447), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""drop_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [5], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""drop"""'}), "(db, 'drop_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [5], 'item': 'bar', 'price': 20.0}, {'vector': [np.\n nan, np.nan], 'item': 'bar', 'price': 20.0}], on_bad_vectors='drop')\n", (11173, 11447), False, 'from lancedb.table import LanceTable\n'), ((11616, 11875), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""fill_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [np.nan, np.nan], 'item':\n 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""fill"""', 'fill_value': '(0.0)'}), "(db, 'fill_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}],\n on_bad_vectors='fill', fill_value=0.0)\n", (11633, 11875), False, 'from lancedb.table import LanceTable\n'), ((12177, 12263), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'type': 'vector'}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'type':\n 'vector'}])\n", (12194, 12263), False, 'from lancedb.table import LanceTable\n'), ((12865, 12976), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (12882, 12976), False, 'from lancedb.table import LanceTable\n'), ((13021, 13073), 'pyarrow.table', 'pa.table', (["{'document': ['foo', 'bar'], 'id': [0, 1]}"], {}), "({'document': ['foo', 'bar'], 'id': [0, 1]})\n", (13029, 13073), True, 'import pyarrow as pa\n'), ((13175, 13289), 'pyarrow.table', 'pa.table', (["{'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': ['foo', 'bar']}"], {'schema': 'table.schema'}), "({'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': [\n 'foo', 'bar']}, schema=table.schema)\n", (13183, 13289), True, 'import pyarrow as pa\n'), ((13369, 13433), 'lance.write_dataset', 'lance.write_dataset', (['other_table', "(tmp_path / 'other_table.lance')"], {}), "(other_table, tmp_path / 'other_table.lance')\n", (13388, 13433), False, 'import lance\n'), ((13535, 13646), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13552, 13646), False, 'from lancedb.table import LanceTable\n'), ((13954, 14065), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13971, 14065), False, 'from lancedb.table import LanceTable\n'), ((16294, 16342), 'pyarrow.table', 'pa.table', (["{'a': [2, 3, 4], 'b': ['x', 'y', 'z']}"], {}), "({'a': [2, 3, 4], 'b': ['x', 'y', 'z']})\n", (16302, 16342), True, 'import pyarrow as pa\n'), ((16489, 16545), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']})\n", (16497, 16545), True, 'import pyarrow as pa\n'), ((16776, 16824), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'x', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'x', 'c']})\n", (16784, 16824), True, 'import pyarrow as pa\n'), ((17026, 17082), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']})\n", (17034, 17082), True, 'import pyarrow as pa\n'), ((17180, 17220), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17188, 17220), True, 'import pyarrow as pa\n'), ((17431, 17479), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 4], 'b': ['a', 'x', 'z']}"], {}), "({'a': [1, 2, 4], 'b': ['a', 'x', 'z']})\n", (17439, 17479), True, 'import pyarrow as pa\n'), ((17777, 17817), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17785, 17817), True, 'import pyarrow as pa\n'), ((18006, 18033), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (18031, 18033), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((18204, 18292), 'lancedb.embeddings.EmbeddingFunctionConfig', 'EmbeddingFunctionConfig', ([], {'source_column': '"""text"""', 'vector_column': '"""vector"""', 'function': 'func'}), "(source_column='text', vector_column='vector',\n function=func)\n", (18227, 18292), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((18315, 18392), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable', 'embedding_functions': '[conf]'}), "(db, 'my_table', schema=MyTable, embedding_functions=[conf])\n", (18332, 18392), False, 'from lancedb.table import LanceTable\n'), ((19036, 19084), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""f16_tbl"""'], {'schema': 'MyTable'}), "(db, 'f16_tbl', schema=MyTable)\n", (19053, 19084), False, 'from lancedb.table import LanceTable\n'), ((19578, 19627), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (19595, 19627), False, 'from lancedb.table import LanceTable\n'), ((19708, 19737), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': texts}"], {}), "({'text': texts})\n", (19720, 19737), True, 'import pandas as pd\n'), ((20270, 20319), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (20287, 20319), False, 'from lancedb.table import LanceTable\n'), ((20361, 20380), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20376, 20380), True, 'import numpy as np\n'), ((20390, 20409), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20405, 20409), True, 'import numpy as np\n'), ((20548, 20566), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (20560, 20566), True, 'import pandas as pd\n'), ((20594, 20613), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20609, 20613), True, 'import numpy as np\n'), ((20999, 21100), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5], 'vector': vec_array}"], {}), "({'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5],\n 'vector': vec_array})\n", (21019, 21100), True, 'import pyarrow as pa\n'), ((21123, 21172), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'test_data'}), "(db, 'my_table', data=test_data)\n", (21140, 21172), False, 'from lancedb.table import LanceTable\n'), ((21808, 21904), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (21825, 21904), False, 'from lancedb.table import LanceTable\n'), ((22540, 22589), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (22557, 22589), False, 'from lancedb.table import LanceTable\n'), ((22631, 22650), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22646, 22650), True, 'import numpy as np\n'), ((22660, 22679), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22675, 22679), True, 'import numpy as np\n'), ((22794, 22812), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (22806, 22812), True, 'import pandas as pd\n'), ((22840, 22859), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22855, 22859), True, 'import numpy as np\n'), ((23231, 23280), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (23248, 23280), False, 'from lancedb.table import LanceTable\n'), ((23322, 23341), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23337, 23341), True, 'import numpy as np\n'), ((23351, 23370), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23366, 23370), True, 'import numpy as np\n'), ((23509, 23527), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (23521, 23527), True, 'import pandas as pd\n'), ((23555, 23574), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23570, 23574), True, 'import numpy as np\n'), ((23700, 23796), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (23717, 23796), False, 'from lancedb.table import LanceTable\n'), ((24499, 24595), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (24516, 24595), False, 'from lancedb.table import LanceTable\n'), ((24817, 24851), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (24836, 24851), False, 'import pytest\n'), ((25200, 25249), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (25217, 25249), False, 'from lancedb.table import LanceTable\n'), ((26793, 26818), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (26808, 26818), False, 'import lancedb\n'), ((26831, 26882), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (26848, 26882), False, 'from lancedb.table import LanceTable\n'), ((26894, 26967), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {'read_consistency_interval': 'consistency_interval'}), '(tmp_path, read_consistency_interval=consistency_interval)\n', (26909, 26967), False, 'import lancedb\n'), ((27586, 27611), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (27601, 27611), False, 'import lancedb\n'), ((27624, 27675), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (27641, 27675), False, 'from lancedb.table import LanceTable\n'), ((27917, 27929), 'copy.copy', 'copy', (['table2'], {}), '(table2)\n', (27921, 27929), False, 'from copy import copy\n'), ((28061, 28078), 'copy.copy', 'copy', (['table_fixed'], {}), '(table_fixed)\n', (28065, 28078), False, 'from copy import copy\n'), ((28312, 28337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28327, 28337), False, 'import lancedb\n'), ((28349, 28373), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28357, 28373), True, 'import pyarrow as pa\n'), ((28386, 28430), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28403, 28430), False, 'from lancedb.table import LanceTable\n'), ((28644, 28669), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28659, 28669), False, 'import lancedb\n'), ((28681, 28705), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28689, 28705), True, 'import pyarrow as pa\n'), ((28718, 28762), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28735, 28762), False, 'from lancedb.table import LanceTable\n'), ((28922, 28947), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28937, 28947), False, 'import lancedb\n'), ((28959, 29007), 'pyarrow.table', 'pa.table', (["{'id': [0, 1], 'category': ['a', 'b']}"], {}), "({'id': [0, 1], 'category': ['a', 'b']})\n", (28967, 29007), True, 'import pyarrow as pa\n'), ((29020, 29064), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (29037, 29064), False, 'from lancedb.table import LanceTable\n'), ((2370, 2430), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Table some_table is closed"""'}), "(Exception, match='Table some_table is closed')\n", (2383, 2430), False, 'import pytest\n'), ((3965, 4004), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {'schema': 'schema'}), '(df, schema=schema)\n', (3985, 4004), True, 'import pyarrow as pa\n'), ((5702, 5711), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (5708, 5711), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((9508, 9575), 'unittest.mock.patch.object', 'patch.object', (['LanceTable', '"""_dataset_mut"""'], {'new_callable': 'PropertyMock'}), "(LanceTable, '_dataset_mut', new_callable=PropertyMock)\n", (9520, 9575), False, 'from unittest.mock import PropertyMock, patch\n'), ((9764, 9797), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', ([], {'uri': '"""mock.uri"""'}), "(uri='mock.uri')\n", (9781, 9797), False, 'from lancedb.db import AsyncConnection, LanceDBConnection\n'), ((9814, 9850), 'lancedb.table.LanceTable', 'LanceTable', (['connection', '"""test_table"""'], {}), "(connection, 'test_table')\n", (9824, 9850), False, 'from lancedb.table import LanceTable\n'), ((12119, 12139), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12127, 12139), True, 'import numpy as np\n'), ((12707, 12732), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12720, 12732), False, 'import pytest\n'), ((12769, 12794), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12782, 12794), False, 'import pytest\n'), ((14444, 14478), 'numpy.array', 'np.array', (['[[1.2, 1.9], [1.1, 1.1]]'], {}), '([[1.2, 1.9], [1.1, 1.1]])\n', (14452, 14478), True, 'import numpy as np\n'), ((17983, 17993), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (17989, 17993), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20218, 20228), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20224, 20228), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20246, 20256), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20252, 20256), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((22516, 22526), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (22522, 22526), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23179, 23189), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23185, 23189), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23207, 23217), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23213, 23217), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23584, 23609), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23597, 23609), False, 'import pytest\n'), ((24372, 24432), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Version 3 no longer exists"""'}), "(Exception, match='Version 3 no longer exists')\n", (24385, 24432), False, 'import pytest\n'), ((26682, 26702), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (26691, 26702), False, 'from datetime import date, datetime, timedelta\n'), ((26704, 26726), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (26713, 26726), False, 'from datetime import date, datetime, timedelta\n'), ((29580, 29683), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""table cannot be modified when a specific version is checked out"""'}), "(ValueError, match=\n 'table cannot be modified when a specific version is checked out')\n", (29593, 29683), False, 'import pytest\n'), ((30252, 30318), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""checkout before running restore"""'}), "(ValueError, match='checkout before running restore')\n", (30265, 30318), False, 'import pytest\n'), ((1756, 1905), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (1773, 1905), False, 'from lancedb.table import LanceTable\n'), ((3624, 3648), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3632, 3648), True, 'import pyarrow as pa\n'), ((3662, 3684), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3670, 3684), True, 'import pyarrow as pa\n'), ((7305, 7323), 'polars.DataFrame', 'pl.DataFrame', (['data'], {}), '(data)\n', (7317, 7323), True, 'import polars as pl\n'), ((8732, 8763), 'pyarrow.array', 'pa.array', (["['foo', 'bar', 'new']"], {}), "(['foo', 'bar', 'new'])\n", (8740, 8763), True, 'import pyarrow as pa\n'), ((8777, 8805), 'pyarrow.array', 'pa.array', (['[10.0, 20.0, 30.0]'], {}), '([10.0, 20.0, 30.0])\n', (8785, 8805), True, 'import pyarrow as pa\n'), ((10939, 10964), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10952, 10964), False, 'import pytest\n'), ((10978, 11083), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""error_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, row]"}), "(db, 'error_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, row])\n", (10995, 11083), False, 'from lancedb.table import LanceTable\n'), ((15338, 15358), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15346, 15358), False, 'from datetime import date, datetime, timedelta\n'), ((15373, 15389), 'datetime.date', 'date', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15377, 15389), False, 'from datetime import date, datetime, timedelta\n'), ((15917, 15937), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15925, 15937), False, 'from datetime import date, datetime, timedelta\n'), ((15952, 15968), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15956, 15968), False, 'from datetime import date, datetime, timedelta\n'), ((16167, 16215), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'b', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})\n", (16175, 16215), True, 'import pyarrow as pa\n'), ((20960, 20972), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (20970, 20972), True, 'import pyarrow as pa\n'), ((24259, 24271), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (24268, 24271), False, 'from datetime import date, datetime, timedelta\n'), ((27281, 27301), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27290, 27301), False, 'from datetime import date, datetime, timedelta\n'), ((27477, 27487), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (27482, 27487), False, 'from time import sleep\n'), ((27739, 27759), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27748, 27759), False, 'from datetime import date, datetime, timedelta\n'), ((1698, 1718), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (1707, 1718), False, 'from datetime import date, datetime, timedelta\n'), ((3407, 3418), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3416, 3418), True, 'import pyarrow as pa\n'), ((3451, 3463), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3461, 3463), True, 'import pyarrow as pa\n'), ((3575, 3606), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5]'], {}), '([3.1, 4.1, 5.9, 26.5])\n', (3583, 3606), True, 'import pyarrow as pa\n'), ((4365, 4376), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4374, 4376), True, 'import pyarrow as pa\n'), ((4409, 4421), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4419, 4421), True, 'import pyarrow as pa\n'), ((4804, 4815), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4813, 4815), True, 'import pyarrow as pa\n'), ((4848, 4860), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4858, 4860), True, 'import pyarrow as pa\n'), ((7688, 7705), 'pyarrow.large_string', 'pa.large_string', ([], {}), '()\n', (7703, 7705), True, 'import pyarrow as pa\n'), ((7738, 7750), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7748, 7750), True, 'import pyarrow as pa\n'), ((8658, 8701), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5, 6.3, 100.5]'], {}), '([3.1, 4.1, 5.9, 26.5, 6.3, 100.5])\n', (8666, 8701), True, 'import pyarrow as pa\n'), ((18819, 18831), 'pyarrow.float16', 'pa.float16', ([], {}), '()\n', (18829, 18831), True, 'import pyarrow as pa\n'), ((19384, 19424), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (19422, 19424), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((24962, 25002), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (25000, 25002), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((3359, 3371), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3369, 3371), True, 'import pyarrow as pa\n'), ((4317, 4329), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4327, 4329), True, 'import pyarrow as pa\n'), ((4756, 4768), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4766, 4768), True, 'import pyarrow as pa\n'), ((7640, 7652), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7650, 7652), True, 'import pyarrow as pa\n'), ((14715, 14735), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14723, 14735), False, 'from datetime import date, datetime, timedelta\n'), ((14761, 14777), 'datetime.date', 'date', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14765, 14777), False, 'from datetime import date, datetime, timedelta\n'), ((15634, 15654), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15642, 15654), False, 'from datetime import date, datetime, timedelta\n'), ((15673, 15689), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15677, 15689), False, 'from datetime import date, datetime, timedelta\n'), ((15743, 15763), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (15751, 15763), True, 'import numpy as np\n'), ((4068, 4125), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', 'f"""test_{i}"""'], {'data': 'd', 'schema': 'schema'}), "(db, f'test_{i}', data=d, schema=schema)\n", (4085, 4125), False, 'from lancedb.table import LanceTable\n'), ((8271, 8285), 'polars.col', 'pl.col', (['"""item"""'], {}), "('item')\n", (8277, 8285), True, 'import polars as pl\n'), ((18944, 18964), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (18959, 18964), True, 'import numpy as np\n'), ((6099, 6113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6111, 6113), False, 'from datetime import date, datetime, timedelta\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urljoin
import attrs
import pyarrow as pa
import requests
from pydantic import BaseModel
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
def _read_ipc(resp: requests.Response) -> pa.Table:
resp_body = resp.content
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
@functools.cached_property
def session(self) -> requests.Session:
sess = requests.Session()
retry_adapter_instance = retry_adapter(retry_adapter_options())
sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance)
adapter_class = LanceDBClientHTTPAdapterFactory()
sess.mount("https://", adapter_class())
return sess
@property
def url(self) -> str:
return (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
def close(self):
self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
def _check_status(resp: requests.Response):
if resp.status_code == 404:
raise LanceDBClientError(f"Not found: {resp.text}")
elif 400 <= resp.status_code < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status_code}, error: {resp.text}"
)
elif 500 <= resp.status_code < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status_code}, error: {resp.text}"
)
elif resp.status_code != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status_code}, error: {resp.text}"
)
@_check_not_closed
def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
with self.session.get(
urljoin(self.url, uri),
params=params,
headers=self.headers,
timeout=(120.0, 300.0),
) as resp:
self._check_status(resp)
return resp.json()
@_check_not_closed
def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
with self.session.post(
urljoin(self.url, uri),
headers=headers,
params=params,
timeout=(120.0, 300.0),
**req_kwargs,
) as resp:
self._check_status(resp)
return deserialize(resp)
@_check_not_closed
def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc)
return VectorQueryResult(tbl)
def mount_retry_adapter_for_table(self, table_name: str) -> None:
"""
Adds an http adapter to session that will retry retryable requests to the table.
"""
retry_options = retry_adapter_options(methods=["GET", "POST"])
retry_adapter_instance = retry_adapter(retry_options)
session = self.session
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/describe/"),
retry_adapter_instance,
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/index/list/"),
retry_adapter_instance,
)
def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]:
return {
"retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")),
"connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")),
"read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")),
"backoff_factor": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25")
),
"backoff_jitter": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25")
),
"statuses": [
int(i.strip())
for i in os.environ.get(
"LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503"
).split(",")
],
"methods": methods,
}
def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
total_retries = options["retries"]
connect_retries = options["connect_retries"]
read_retries = options["read_retries"]
backoff_factor = options["backoff_factor"]
backoff_jitter = options["backoff_jitter"]
statuses = options["statuses"]
methods = frozenset(options["methods"])
logging.debug(
f"Setting up retry adapter with {total_retries} retries," # noqa G003
+ f"connect retries {connect_retries}, read retries {read_retries},"
+ f"backoff factor {backoff_factor}, statuses {statuses}, "
+ f"methods {methods}"
)
return HTTPAdapter(
max_retries=Retry(
total=total_retries,
connect=connect_retries,
read=read_retries,
backoff_factor=backoff_factor,
backoff_jitter=backoff_jitter,
status_forcelist=statuses,
allowed_methods=methods,
)
)
| [
"lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory",
"lancedb.remote.VectorQueryResult",
"lancedb.remote.errors.LanceDBClientError"
] | [((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((7965, 8201), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (7978, 8201), False, 'import logging\n'), ((1908, 1926), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1924, 1926), False, 'import requests\n'), ((2101, 2134), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2132, 2134), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6057, 6079), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6074, 6079), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2019, 2050), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2026, 2050), False, 'from urllib.parse import urljoin\n'), ((2957, 3002), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (2975, 3002), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6464, 6515), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6471, 6515), False, 'from urllib.parse import urljoin\n'), ((6585, 6639), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6592, 6639), False, 'from urllib.parse import urljoin\n'), ((6722, 6778), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6729, 6778), False, 'from urllib.parse import urljoin\n'), ((6926, 6973), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (6940, 6973), False, 'import os\n'), ((7007, 7058), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7021, 7058), False, 'import os\n'), ((7089, 7137), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7103, 7137), False, 'import os\n'), ((7185, 7244), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7199, 7244), False, 'import os\n'), ((7301, 7360), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7315, 7360), False, 'import os\n'), ((8286, 8478), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8291, 8478), False, 'from urllib3 import Retry\n'), ((3065, 3139), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3083, 3139), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3845, 3867), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3852, 3867), False, 'from urllib.parse import urljoin\n'), ((5259, 5281), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5266, 5281), False, 'from urllib.parse import urljoin\n'), ((3232, 3321), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3250, 3321), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3403, 3479), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3421, 3479), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7442, 7509), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7456, 7509), False, 'import os\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
from datetime import date, datetime
from typing import List, Optional, Tuple
import pyarrow as pa
import pydantic
import pytest
from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema
from pydantic import Field
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="using native type alias requires python3.9 or higher",
)
def test_pydantic_to_arrow():
class StructModel(pydantic.BaseModel):
a: str
b: Optional[float]
class TestModel(pydantic.BaseModel):
id: int
s: str
vec: list[float]
li: list[int]
lili: list[list[float]]
litu: list[tuple[float, float]]
opt: Optional[str] = None
st: StructModel
dt: date
dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict
# TODO: test we can actually convert the model into data.
# m = TestModel(
# id=1,
# s="hello",
# vec=[1.0, 2.0, 3.0],
# li=[2, 3, 4],
# lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
# litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
# st=StructModel(a="a", b=1.0),
# dt=date.today(),
# dtt=datetime.now(),
# dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
# )
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("id", pa.int64(), False),
pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True),
pa.field(
"st",
pa.struct(
[pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)]
),
False,
),
pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
]
)
assert schema == expect_schema
@pytest.mark.skipif(
sys.version_info < (3, 10),
reason="using | type syntax requires python3.10 or higher",
)
def test_optional_types_py310():
class TestModel(pydantic.BaseModel):
a: str | None
b: None | str
c: Optional[str]
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("a", pa.utf8(), True),
pa.field("b", pa.utf8(), True),
pa.field("c", pa.utf8(), True),
]
)
assert schema == expect_schema
@pytest.mark.skipif(
sys.version_info > (3, 8),
reason="using native type alias requires python3.9 or higher",
)
def test_pydantic_to_arrow_py38():
class StructModel(pydantic.BaseModel):
a: str
b: Optional[float]
class TestModel(pydantic.BaseModel):
id: int
s: str
vec: List[float]
li: List[int]
lili: List[List[float]]
litu: List[Tuple[float, float]]
opt: Optional[str] = None
st: StructModel
dt: date
dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict
# TODO: test we can actually convert the model to Arrow data.
# m = TestModel(
# id=1,
# s="hello",
# vec=[1.0, 2.0, 3.0],
# li=[2, 3, 4],
# lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
# litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
# st=StructModel(a="a", b=1.0),
# dt=date.today(),
# dtt=datetime.now(),
# dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
# )
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("id", pa.int64(), False),
pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True),
pa.field(
"st",
pa.struct(
[pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)]
),
False,
),
pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
]
)
assert schema == expect_schema
def test_fixed_size_list_field():
class TestModel(pydantic.BaseModel):
vec: Vector(16)
li: List[int]
data = TestModel(vec=list(range(16)), li=[1, 2, 3])
if PYDANTIC_VERSION >= (2,):
assert json.loads(data.model_dump_json()) == {
"vec": list(range(16)),
"li": [1, 2, 3],
}
else:
assert data.dict() == {
"vec": list(range(16)),
"li": [1, 2, 3],
}
schema = pydantic_to_schema(TestModel)
assert schema == pa.schema(
[
pa.field("vec", pa.list_(pa.float32(), 16), False),
pa.field("li", pa.list_(pa.int64()), False),
]
)
if PYDANTIC_VERSION >= (2,):
json_schema = TestModel.model_json_schema()
else:
json_schema = TestModel.schema()
assert json_schema == {
"properties": {
"vec": {
"items": {"type": "number"},
"maxItems": 16,
"minItems": 16,
"title": "Vec",
"type": "array",
},
"li": {"items": {"type": "integer"}, "title": "Li", "type": "array"},
},
"required": ["vec", "li"],
"title": "TestModel",
"type": "object",
}
def test_fixed_size_list_validation():
class TestModel(pydantic.BaseModel):
vec: Vector(8)
with pytest.raises(pydantic.ValidationError):
TestModel(vec=range(9))
with pytest.raises(pydantic.ValidationError):
TestModel(vec=range(7))
TestModel(vec=range(8))
def test_lance_model():
class TestModel(LanceModel):
vector: Vector(16) = Field(default=[0.0] * 16)
li: List[int] = Field(default=[1, 2, 3])
schema = pydantic_to_schema(TestModel)
assert schema == TestModel.to_arrow_schema()
assert TestModel.field_names() == ["vector", "li"]
t = TestModel()
assert t == TestModel(vec=[0.0] * 16, li=[1, 2, 3])
| [
"lancedb.pydantic.Vector",
"lancedb.pydantic.pydantic_to_schema"
] | [((860, 973), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 9))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info < (3, 9), reason=\n 'using native type alias requires python3.9 or higher')\n", (878, 973), False, 'import pytest\n'), ((2877, 2988), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 10))'], {'reason': '"""using | type syntax requires python3.10 or higher"""'}), "(sys.version_info < (3, 10), reason=\n 'using | type syntax requires python3.10 or higher')\n", (2895, 2988), False, 'import pytest\n'), ((3410, 3523), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info > (3, 8))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info > (3, 8), reason=\n 'using native type alias requires python3.9 or higher')\n", (3428, 3523), False, 'import pytest\n'), ((1950, 1979), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (1968, 1979), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((3152, 3181), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (3170, 3181), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((4509, 4538), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (4527, 4538), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((5907, 5936), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (5925, 5936), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7183, 7212), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (7201, 7212), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((1415, 1463), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (1420, 1463), False, 'from pydantic import Field\n'), ((3970, 4018), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (3975, 4018), False, 'from pydantic import Field\n'), ((5523, 5533), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (5529, 5533), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6801, 6810), 'lancedb.pydantic.Vector', 'Vector', (['(8)'], {}), '(8)\n', (6807, 6810), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6821, 6860), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6834, 6860), False, 'import pytest\n'), ((6904, 6943), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6917, 6943), False, 'import pytest\n'), ((7081, 7091), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (7087, 7091), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7094, 7119), 'pydantic.Field', 'Field', ([], {'default': '([0.0] * 16)'}), '(default=[0.0] * 16)\n', (7099, 7119), False, 'from pydantic import Field\n'), ((7144, 7168), 'pydantic.Field', 'Field', ([], {'default': '[1, 2, 3]'}), '(default=[1, 2, 3])\n', (7149, 7168), False, 'from pydantic import Field\n'), ((2049, 2059), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2057, 2059), True, 'import pyarrow as pa\n'), ((2095, 2104), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2102, 2104), True, 'import pyarrow as pa\n'), ((2401, 2410), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2408, 2410), True, 'import pyarrow as pa\n'), ((2663, 2674), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (2672, 2674), True, 'import pyarrow as pa\n'), ((2712, 2730), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (2724, 2730), True, 'import pyarrow as pa\n'), ((2775, 2813), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (2787, 2813), True, 'import pyarrow as pa\n'), ((3250, 3259), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3257, 3259), True, 'import pyarrow as pa\n'), ((3294, 3303), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3301, 3303), True, 'import pyarrow as pa\n'), ((3338, 3347), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3345, 3347), True, 'import pyarrow as pa\n'), ((4608, 4618), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4616, 4618), True, 'import pyarrow as pa\n'), ((4654, 4663), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4661, 4663), True, 'import pyarrow as pa\n'), ((4960, 4969), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4967, 4969), True, 'import pyarrow as pa\n'), ((5222, 5233), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (5231, 5233), True, 'import pyarrow as pa\n'), ((5271, 5289), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (5283, 5289), True, 'import pyarrow as pa\n'), ((5334, 5372), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (5346, 5372), True, 'import pyarrow as pa\n'), ((2151, 2163), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2161, 2163), True, 'import pyarrow as pa\n'), ((2210, 2220), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2218, 2220), True, 'import pyarrow as pa\n'), ((4710, 4722), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4720, 4722), True, 'import pyarrow as pa\n'), ((4769, 4779), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4777, 4779), True, 'import pyarrow as pa\n'), ((2278, 2290), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2288, 2290), True, 'import pyarrow as pa\n'), ((2349, 2361), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2359, 2361), True, 'import pyarrow as pa\n'), ((4837, 4849), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4847, 4849), True, 'import pyarrow as pa\n'), ((4908, 4920), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4918, 4920), True, 'import pyarrow as pa\n'), ((6016, 6028), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6026, 6028), True, 'import pyarrow as pa\n'), ((6079, 6089), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (6087, 6089), True, 'import pyarrow as pa\n'), ((2525, 2534), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2532, 2534), True, 'import pyarrow as pa\n'), ((2558, 2570), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2568, 2570), True, 'import pyarrow as pa\n'), ((5084, 5093), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5091, 5093), True, 'import pyarrow as pa\n'), ((5117, 5129), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (5127, 5129), True, 'import pyarrow as pa\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
from datetime import timedelta
import lance
import lancedb
import numpy as np
import pandas.testing as tm
import pyarrow as pa
import pytest
import pytest_asyncio
from lancedb.db import LanceDBConnection
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query
from lancedb.table import AsyncTable, LanceTable
class MockTable:
def __init__(self, tmp_path):
self.uri = tmp_path
self._conn = LanceDBConnection(self.uri)
def to_lance(self):
return lance.dataset(self.uri)
def _execute_query(self, query):
ds = self.to_lance()
return ds.to_table(
columns=query.columns,
filter=query.filter,
prefilter=query.prefilter,
nearest={
"column": query.vector_column,
"q": query.vector,
"k": query.k,
"metric": query.metric,
"nprobes": query.nprobes,
"refine_factor": query.refine_factor,
},
)
@pytest.fixture
def table(tmp_path) -> MockTable:
df = pa.table(
{
"vector": pa.array(
[[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
),
"id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
}
)
lance.write_dataset(df, tmp_path)
return MockTable(tmp_path)
@pytest_asyncio.fixture
async def table_async(tmp_path) -> AsyncTable:
conn = await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=0)
)
data = pa.table(
{
"vector": pa.array(
[[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
),
"id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
}
)
return await conn.create_table("test", data)
def test_cast(table):
class TestModel(LanceModel):
vector: Vector(2)
id: int
str_field: str
float_field: float
q = LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1)
results = q.to_pydantic(TestModel)
assert len(results) == 1
r0 = results[0]
assert isinstance(r0, TestModel)
assert r0.id == 1
assert r0.vector == [1, 2]
assert r0.str_field == "a"
assert r0.float_field == 1.0
def test_query_builder(table):
rs = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.limit(1)
.select(["id", "vector"])
.to_list()
)
assert rs[0]["id"] == 1
assert all(np.array(rs[0]["vector"]) == [1, 2])
def test_dynamic_projection(table):
rs = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.limit(1)
.select({"id": "id", "id2": "id * 2"})
.to_list()
)
assert rs[0]["id"] == 1
assert rs[0]["id2"] == 2
def test_query_builder_with_filter(table):
rs = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_list()
assert rs[0]["id"] == 2
assert all(np.array(rs[0]["vector"]) == [3, 4])
def test_query_builder_with_prefilter(table):
df = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.where("id = 2")
.limit(1)
.to_pandas()
)
assert len(df) == 0
df = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.where("id = 2", prefilter=True)
.limit(1)
.to_pandas()
)
assert df["id"].values[0] == 2
assert all(df["vector"].values[0] == [3, 4])
def test_query_builder_with_metric(table):
query = [4, 8]
vector_column_name = "vector"
df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_pandas()
df_l2 = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("L2")
.to_pandas()
)
tm.assert_frame_equal(df_default, df_l2)
df_cosine = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine")
.limit(1)
.to_pandas()
)
assert df_cosine._distance[0] == pytest.approx(
cosine_distance(query, df_cosine.vector[0]),
abs=1e-6,
)
assert 0 <= df_cosine._distance[0] <= 1
def test_query_builder_with_different_vector_column():
table = mock.MagicMock(spec=LanceTable)
query = [4, 8]
vector_column_name = "foo_vector"
builder = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine")
.where("b < 10")
.select(["b"])
.limit(2)
)
ds = mock.Mock()
table.to_lance.return_value = ds
builder.to_arrow()
table._execute_query.assert_called_once_with(
Query(
vector=query,
filter="b < 10",
k=2,
metric="cosine",
columns=["b"],
nprobes=20,
refine_factor=None,
vector_column="foo_vector",
)
)
def cosine_distance(vec1, vec2):
return 1 - np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
async def check_query(
query: AsyncQueryBase, *, expected_num_rows=None, expected_columns=None
):
num_rows = 0
results = await query.to_batches()
async for batch in results:
if expected_columns is not None:
assert batch.schema.names == expected_columns
num_rows += batch.num_rows
if expected_num_rows is not None:
assert num_rows == expected_num_rows
@pytest.mark.asyncio
async def test_query_async(table_async: AsyncTable):
await check_query(
table_async.query(),
expected_num_rows=2,
expected_columns=["vector", "id", "str_field", "float_field"],
)
await check_query(table_async.query().where("id = 2"), expected_num_rows=1)
await check_query(
table_async.query().select(["id", "vector"]), expected_columns=["id", "vector"]
)
await check_query(
table_async.query().select({"foo": "id", "bar": "id + 1"}),
expected_columns=["foo", "bar"],
)
await check_query(table_async.query().limit(1), expected_num_rows=1)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])), expected_num_rows=2
)
# Support different types of inputs for the vector query
for vector_query in [
[1, 2],
[1.0, 2.0],
np.array([1, 2]),
(1, 2),
]:
await check_query(
table_async.query().nearest_to(vector_query), expected_num_rows=2
)
# No easy way to check these vector query parameters are doing what they say. We
# just check that they don't raise exceptions and assume this is tested at a lower
# level.
await check_query(
table_async.query().where("id = 2").nearest_to(pa.array([1, 2])).postfilter(),
expected_num_rows=1,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).refine_factor(1),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).nprobes(10),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).bypass_vector_index(),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).distance_type("dot"),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).distance_type("DoT"),
expected_num_rows=2,
)
# Make sure we can use a vector query as a base query (e.g. call limit on it)
# Also make sure `vector_search` works
await check_query(table_async.vector_search([1, 2]).limit(1), expected_num_rows=1)
# Also check an empty query
await check_query(table_async.query().where("id < 0"), expected_num_rows=0)
@pytest.mark.asyncio
async def test_query_to_arrow_async(table_async: AsyncTable):
table = await table_async.to_arrow()
assert table.num_rows == 2
assert table.num_columns == 4
table = await table_async.query().to_arrow()
assert table.num_rows == 2
assert table.num_columns == 4
table = await table_async.query().where("id < 0").to_arrow()
assert table.num_rows == 0
assert table.num_columns == 4
@pytest.mark.asyncio
async def test_query_to_pandas_async(table_async: AsyncTable):
df = await table_async.to_pandas()
assert df.shape == (2, 4)
df = await table_async.query().to_pandas()
assert df.shape == (2, 4)
df = await table_async.query().where("id < 0").to_pandas()
assert df.shape == (0, 4)
| [
"lancedb.pydantic.Vector",
"lancedb.query.LanceVectorQueryBuilder",
"lancedb.query.Query",
"lancedb.db.LanceDBConnection"
] | [((2041, 2074), 'lance.write_dataset', 'lance.write_dataset', (['df', 'tmp_path'], {}), '(df, tmp_path)\n', (2060, 2074), False, 'import lance\n'), ((4585, 4625), 'pandas.testing.assert_frame_equal', 'tm.assert_frame_equal', (['df_default', 'df_l2'], {}), '(df_default, df_l2)\n', (4606, 4625), True, 'import pandas.testing as tm\n'), ((5024, 5055), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'LanceTable'}), '(spec=LanceTable)\n', (5038, 5055), True, 'import unittest.mock as mock\n'), ((5302, 5313), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5311, 5313), True, 'import unittest.mock as mock\n'), ((1097, 1124), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', (['self.uri'], {}), '(self.uri)\n', (1114, 1124), False, 'from lancedb.db import LanceDBConnection\n'), ((1165, 1188), 'lance.dataset', 'lance.dataset', (['self.uri'], {}), '(self.uri)\n', (1178, 1188), False, 'import lance\n'), ((2713, 2722), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2719, 2722), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((5432, 5569), 'lancedb.query.Query', 'Query', ([], {'vector': 'query', 'filter': '"""b < 10"""', 'k': '(2)', 'metric': '"""cosine"""', 'columns': "['b']", 'nprobes': '(20)', 'refine_factor': 'None', 'vector_column': '"""foo_vector"""'}), "(vector=query, filter='b < 10', k=2, metric='cosine', columns=['b'],\n nprobes=20, refine_factor=None, vector_column='foo_vector')\n", (5437, 5569), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((7085, 7101), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (7093, 7101), True, 'import numpy as np\n'), ((1907, 1923), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (1915, 1923), True, 'import pyarrow as pa\n'), ((1950, 1970), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (1958, 1970), True, 'import pyarrow as pa\n'), ((1999, 2019), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2007, 2019), True, 'import pyarrow as pa\n'), ((2461, 2477), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (2469, 2477), True, 'import pyarrow as pa\n'), ((2504, 2524), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (2512, 2524), True, 'import pyarrow as pa\n'), ((2553, 2573), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2561, 2573), True, 'import pyarrow as pa\n'), ((2798, 2846), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (2821, 2846), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3319, 3344), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3327, 3344), True, 'import numpy as np\n'), ((3781, 3806), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3789, 3806), True, 'import numpy as np\n'), ((4382, 4439), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4405, 4439), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5729, 5747), 'numpy.dot', 'np.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (5735, 5747), True, 'import numpy as np\n'), ((2263, 2283), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (2272, 2283), False, 'from datetime import timedelta\n'), ((5751, 5771), 'numpy.linalg.norm', 'np.linalg.norm', (['vec1'], {}), '(vec1)\n', (5765, 5771), True, 'import numpy as np\n'), ((5774, 5794), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (5788, 5794), True, 'import numpy as np\n'), ((6909, 6925), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (6917, 6925), True, 'import pyarrow as pa\n'), ((3663, 3711), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3686, 3711), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4474, 4531), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4497, 4531), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((1847, 1859), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1857, 1859), True, 'import pyarrow as pa\n'), ((2401, 2413), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2411, 2413), True, 'import pyarrow as pa\n'), ((7506, 7522), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7514, 7522), True, 'import pyarrow as pa\n'), ((7635, 7651), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7643, 7651), True, 'import pyarrow as pa\n'), ((7768, 7784), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7776, 7784), True, 'import pyarrow as pa\n'), ((7896, 7912), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7904, 7912), True, 'import pyarrow as pa\n'), ((8034, 8050), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8042, 8050), True, 'import pyarrow as pa\n'), ((8171, 8187), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8179, 8187), True, 'import pyarrow as pa\n'), ((3150, 3198), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3173, 3198), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3413, 3461), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3436, 3461), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3885, 3933), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3908, 3933), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4048, 4096), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (4071, 4096), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4653, 4710), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4676, 4710), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5137, 5194), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (5160, 5194), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n')] |
# Copyright (c) 2023. LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import io
import os
import lancedb
import numpy as np
import pandas as pd
import pytest
import requests
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
# These are integration tests for embedding functions.
# They are slow because they require downloading models
# or connection to external api
try:
if importlib.util.find_spec("mlx.core") is not None:
_mlx = True
else:
_mlx = None
except Exception:
_mlx = None
try:
if importlib.util.find_spec("imagebind") is not None:
_imagebind = True
else:
_imagebind = None
except Exception:
_imagebind = None
@pytest.mark.slow
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai"])
def test_basic_text_embeddings(alias, tmp_path):
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get(alias).create(max_retries=0)
func2 = registry.get(alias).create(max_retries=0)
class Words(LanceModel):
text: str = func.SourceField()
text2: str = func2.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
vector2: Vector(func2.ndims()) = func2.VectorField()
table = db.create_table("words", schema=Words)
table.add(
pd.DataFrame(
{
"text": [
"hello world",
"goodbye world",
"fizz",
"buzz",
"foo",
"bar",
"baz",
],
"text2": [
"to be or not to be",
"that is the question",
"for whether tis nobler",
"in the mind to suffer",
"the slings and arrows",
"of outrageous fortune",
"or to take arms",
],
}
)
)
query = "greetings"
actual = (
table.search(query, vector_column_name="vector").limit(1).to_pydantic(Words)[0]
)
vec = func.compute_query_embeddings(query)[0]
expected = (
table.search(vec, vector_column_name="vector").limit(1).to_pydantic(Words)[0]
)
assert actual.text == expected.text
assert actual.text == "hello world"
assert not np.allclose(actual.vector, actual.vector2)
actual = (
table.search(query, vector_column_name="vector2").limit(1).to_pydantic(Words)[0]
)
assert actual.text != "hello world"
assert not np.allclose(actual.vector, actual.vector2)
@pytest.mark.slow
def test_openclip(tmp_path):
from PIL import Image
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get("open-clip").create(max_retries=0)
class Images(LanceModel):
label: str
image_uri: str = func.SourceField()
image_bytes: bytes = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
vec_from_bytes: Vector(func.ndims()) = func.VectorField()
table = db.create_table("images", schema=Images)
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
uris = [
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
]
# get each uri as bytes
image_bytes = [requests.get(uri).content for uri in uris]
table.add(
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
)
# text search
actual = (
table.search("man's best friend", vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
frombytes = (
table.search("man's best friend", vector_column_name="vec_from_bytes")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == frombytes.label
assert np.allclose(actual.vector, frombytes.vector)
# image search
query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg"
image_bytes = requests.get(query_image_uri).content
query_image = Image.open(io.BytesIO(image_bytes))
actual = (
table.search(query_image, vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
other = (
table.search(query_image, vector_column_name="vec_from_bytes")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == other.label
arrow_table = table.search().select(["vector", "vec_from_bytes"]).to_arrow()
assert np.allclose(
arrow_table["vector"].combine_chunks().values.to_numpy(),
arrow_table["vec_from_bytes"].combine_chunks().values.to_numpy(),
)
@pytest.mark.skipif(
_imagebind is None,
reason="skip if imagebind not installed.",
)
@pytest.mark.slow
def test_imagebind(tmp_path):
import os
import shutil
import tempfile
import lancedb.embeddings.imagebind
import pandas as pd
import requests
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
with tempfile.TemporaryDirectory() as temp_dir:
print(f"Created temporary directory {temp_dir}")
def download_images(image_uris):
downloaded_image_paths = []
for uri in image_uris:
try:
response = requests.get(uri, stream=True)
if response.status_code == 200:
# Extract image name from URI
image_name = os.path.basename(uri)
image_path = os.path.join(temp_dir, image_name)
with open(image_path, "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
downloaded_image_paths.append(image_path)
except Exception as e: # noqa: PERF203
print(f"Failed to download {uri}. Error: {e}")
return temp_dir, downloaded_image_paths
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get("imagebind").create(max_retries=0)
class Images(LanceModel):
label: str
image_uri: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("images", schema=Images)
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
uris = [
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
]
temp_dir, downloaded_images = download_images(uris)
table.add(pd.DataFrame({"label": labels, "image_uri": downloaded_images}))
# text search
actual = (
table.search("man's best friend", vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
# image search
query_image_uri = [
"https://live.staticflickr.com/65535/33336453970_491665f66e_h.jpg"
]
temp_dir, downloaded_images = download_images(query_image_uri)
query_image_uri = downloaded_images[0]
actual = (
table.search(query_image_uri, vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
print(f"Deleted temporary directory {temp_dir}")
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("COHERE_API_KEY") is None, reason="COHERE_API_KEY not set"
) # also skip if cohere not installed
def test_cohere_embedding_function():
cohere = (
get_registry()
.get("cohere")
.create(name="embed-multilingual-v2.0", max_retries=0)
)
class TextModel(LanceModel):
text: str = cohere.SourceField()
vector: Vector(cohere.ndims()) = cohere.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect("~/lancedb")
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == cohere.ndims()
@pytest.mark.slow
def test_instructor_embedding(tmp_path):
model = get_registry().get("instructor").create(max_retries=0)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_gemini_embedding(tmp_path):
model = get_registry().get("gemini-text").create(max_retries=0)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
@pytest.mark.skipif(
_mlx is None,
reason="mlx tests only required for apple users.",
)
@pytest.mark.slow
def test_gte_embedding(tmp_path):
import lancedb.embeddings.gte
model = get_registry().get("gte-text").create()
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
def aws_setup():
try:
import boto3
sts = boto3.client("sts")
sts.get_caller_identity()
return True
except Exception:
return False
@pytest.mark.slow
@pytest.mark.skipif(
not aws_setup(), reason="AWS credentials not set or libraries not installed"
)
def test_bedrock_embedding(tmp_path):
for name in [
"amazon.titan-embed-text-v1",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
]:
model = get_registry().get("bedrock-text").create(max_retries=0, name=name)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
)
def test_openai_embedding(tmp_path):
def _get_table(model):
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
return tbl
model = get_registry().get("openai").create(max_retries=0)
tbl = _get_table(model)
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
model = (
get_registry()
.get("openai")
.create(max_retries=0, name="text-embedding-3-large")
)
tbl = _get_table(model)
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
model = (
get_registry()
.get("openai")
.create(max_retries=0, name="text-embedding-3-large", dim=1024)
)
tbl = _get_table(model)
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((1288, 1357), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alias"""', "['sentence-transformers', 'openai']"], {}), "('alias', ['sentence-transformers', 'openai'])\n", (1311, 1357), False, 'import pytest\n'), ((5687, 5773), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_imagebind is None)'], {'reason': '"""skip if imagebind not installed."""'}), "(_imagebind is None, reason=\n 'skip if imagebind not installed.')\n", (5705, 5773), False, 'import pytest\n'), ((10771, 10859), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_mlx is None)'], {'reason': '"""mlx tests only required for apple users."""'}), "(_mlx is None, reason=\n 'mlx tests only required for apple users.')\n", (10789, 10859), False, 'import pytest\n'), ((1416, 1441), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1431, 1441), False, 'import lancedb\n'), ((1457, 1471), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1469, 1471), False, 'from lancedb.embeddings import get_registry\n'), ((3273, 3298), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3288, 3298), False, 'import lancedb\n'), ((3314, 3328), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (3326, 3328), False, 'from lancedb.embeddings import get_registry\n'), ((4825, 4869), 'numpy.allclose', 'np.allclose', (['actual.vector', 'frombytes.vector'], {}), '(actual.vector, frombytes.vector)\n', (4836, 4869), True, 'import numpy as np\n'), ((8732, 8755), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (8745, 8755), False, 'import os\n'), ((9319, 9375), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9331, 9375), True, 'import pandas as pd\n'), ((9385, 9413), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (9400, 9413), False, 'import lancedb\n'), ((9836, 9892), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9848, 9892), True, 'import pandas as pd\n'), ((9902, 9927), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (9917, 9927), False, 'import lancedb\n'), ((10447, 10503), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (10459, 10503), True, 'import pandas as pd\n'), ((10513, 10538), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (10528, 10538), False, 'import lancedb\n'), ((11149, 11205), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (11161, 11205), True, 'import pandas as pd\n'), ((11215, 11240), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11230, 11240), False, 'import lancedb\n'), ((13017, 13073), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (13029, 13073), True, 'import pandas as pd\n'), ((967, 1003), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""mlx.core"""'], {}), "('mlx.core')\n", (991, 1003), False, 'import importlib\n'), ((1114, 1151), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""imagebind"""'], {}), "('imagebind')\n", (1138, 1151), False, 'import importlib\n'), ((1883, 2169), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world', 'fizz', 'buzz', 'foo', 'bar',\n 'baz'], 'text2': ['to be or not to be', 'that is the question',\n 'for whether tis nobler', 'in the mind to suffer',\n 'the slings and arrows', 'of outrageous fortune', 'or to take arms']}"], {}), "({'text': ['hello world', 'goodbye world', 'fizz', 'buzz',\n 'foo', 'bar', 'baz'], 'text2': ['to be or not to be',\n 'that is the question', 'for whether tis nobler',\n 'in the mind to suffer', 'the slings and arrows',\n 'of outrageous fortune', 'or to take arms']})\n", (1895, 2169), True, 'import pandas as pd\n'), ((2936, 2978), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (2947, 2978), True, 'import numpy as np\n'), ((3145, 3187), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (3156, 3187), True, 'import numpy as np\n'), ((4339, 4417), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': uris, 'image_bytes': image_bytes}"], {}), "({'label': labels, 'image_uri': uris, 'image_bytes': image_bytes})\n", (4351, 4417), True, 'import pandas as pd\n'), ((4993, 5022), 'requests.get', 'requests.get', (['query_image_uri'], {}), '(query_image_uri)\n', (5005, 5022), False, 'import requests\n'), ((5060, 5083), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (5070, 5083), False, 'import io\n'), ((6075, 6104), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6102, 6104), False, 'import tempfile\n'), ((7003, 7028), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (7018, 7028), False, 'import lancedb\n'), ((7048, 7062), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (7060, 7062), False, 'from lancedb.embeddings import get_registry\n'), ((8765, 8788), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (8778, 8788), False, 'import shutil\n'), ((8891, 8923), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (8905, 8923), False, 'import os\n'), ((10122, 10154), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (10136, 10154), False, 'import os\n'), ((11534, 11553), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (11546, 11553), False, 'import boto3\n'), ((12195, 12251), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (12207, 12251), True, 'import pandas as pd\n'), ((12265, 12290), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12280, 12290), False, 'import lancedb\n'), ((12796, 12821), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12811, 12821), False, 'import lancedb\n'), ((12497, 12529), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (12511, 12529), False, 'import os\n'), ((4273, 4290), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (4285, 4290), False, 'import requests\n'), ((7984, 8047), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': downloaded_images}"], {}), "({'label': labels, 'image_uri': downloaded_images})\n", (7996, 8047), True, 'import pandas as pd\n'), ((6344, 6374), 'requests.get', 'requests.get', (['uri'], {'stream': '(True)'}), '(uri, stream=True)\n', (6356, 6374), False, 'import requests\n'), ((9065, 9079), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9077, 9079), False, 'from lancedb.embeddings import get_registry\n'), ((9637, 9651), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9649, 9651), False, 'from lancedb.embeddings import get_registry\n'), ((10247, 10261), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10259, 10261), False, 'from lancedb.embeddings import get_registry\n'), ((10965, 10979), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10977, 10979), False, 'from lancedb.embeddings import get_registry\n'), ((12929, 12943), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (12941, 12943), False, 'from lancedb.embeddings import get_registry\n'), ((13256, 13270), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13268, 13270), False, 'from lancedb.embeddings import get_registry\n'), ((13572, 13586), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13584, 13586), False, 'from lancedb.embeddings import get_registry\n'), ((6518, 6539), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (6534, 6539), False, 'import os\n'), ((6577, 6611), 'os.path.join', 'os.path.join', (['temp_dir', 'image_name'], {}), '(temp_dir, image_name)\n', (6589, 6611), False, 'import os\n'), ((11967, 11981), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (11979, 11981), False, 'from lancedb.embeddings import get_registry\n'), ((6705, 6747), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response.raw', 'out_file'], {}), '(response.raw, out_file)\n', (6723, 6747), False, 'import shutil\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lancedb
import pyarrow as pa
from lancedb.remote.client import VectorQuery, VectorQueryResult
class FakeLanceDBClient:
def close(self):
pass
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
assert table_name == "test"
t = pa.schema([]).empty_table()
return VectorQueryResult(t)
def post(self, path: str):
pass
def mount_retry_adapter_for_table(self, table_name: str):
pass
def test_remote_db():
conn = lancedb.connect("db://client-will-be-injected", api_key="fake")
setattr(conn, "_client", FakeLanceDBClient())
table = conn["test"]
table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))])
table.search([1.0, 2.0]).to_pandas()
| [
"lancedb.connect",
"lancedb.remote.client.VectorQueryResult"
] | [((1101, 1164), 'lancedb.connect', 'lancedb.connect', (['"""db://client-will-be-injected"""'], {'api_key': '"""fake"""'}), "('db://client-will-be-injected', api_key='fake')\n", (1116, 1164), False, 'import lancedb\n'), ((924, 944), 'lancedb.remote.client.VectorQueryResult', 'VectorQueryResult', (['t'], {}), '(t)\n', (941, 944), False, 'from lancedb.remote.client import VectorQuery, VectorQueryResult\n'), ((881, 894), 'pyarrow.schema', 'pa.schema', (['[]'], {}), '([])\n', (890, 894), True, 'import pyarrow as pa\n'), ((1299, 1311), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1309, 1311), True, 'import pyarrow as pa\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import List, Union
import lance
import lancedb
import numpy as np
import pyarrow as pa
import pytest
from lancedb.conftest import MockTextEmbeddingFunction
from lancedb.embeddings import (
EmbeddingFunctionConfig,
EmbeddingFunctionRegistry,
with_embeddings,
)
from lancedb.embeddings.base import TextEmbeddingFunction
from lancedb.embeddings.registry import get_registry, register
from lancedb.pydantic import LanceModel, Vector
def mock_embed_func(input_data):
return [np.random.randn(128).tolist() for _ in range(len(input_data))]
def test_with_embeddings():
for wrap_api in [True, False]:
if wrap_api and sys.version_info.minor >= 11:
# ratelimiter package doesn't work on 3.11
continue
data = pa.Table.from_arrays(
[
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
names=["text", "price"],
)
data = with_embeddings(mock_embed_func, data, wrap_api=wrap_api)
assert data.num_columns == 3
assert data.num_rows == 2
assert data.column_names == ["text", "price", "vector"]
assert data.column("text").to_pylist() == ["foo", "bar"]
assert data.column("price").to_pylist() == [10.0, 20.0]
def test_embedding_function(tmp_path):
registry = EmbeddingFunctionRegistry.get_instance()
# let's create a table
table = pa.table(
{
"text": pa.array(["hello world", "goodbye world"]),
"vector": [np.random.randn(10), np.random.randn(10)],
}
)
conf = EmbeddingFunctionConfig(
source_column="text",
vector_column="vector",
function=MockTextEmbeddingFunction(),
)
metadata = registry.get_table_metadata([conf])
table = table.replace_schema_metadata(metadata)
# Write it to disk
lance.write_dataset(table, tmp_path / "test.lance")
# Load this back
ds = lance.dataset(tmp_path / "test.lance")
# can we get the serialized version back out?
configs = registry.parse_functions(ds.schema.metadata)
conf = configs["vector"]
func = conf.function
actual = func.compute_query_embeddings("hello world")
# And we make sure we can call it
expected = func.compute_query_embeddings("hello world")
assert np.allclose(actual, expected)
@pytest.mark.slow
def test_embedding_function_rate_limit(tmp_path):
def _get_schema_from_model(model):
class Schema(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
return Schema
db = lancedb.connect(tmp_path)
registry = EmbeddingFunctionRegistry.get_instance()
model = registry.get("test-rate-limited").create(max_retries=0)
schema = _get_schema_from_model(model)
table = db.create_table("test", schema=schema, mode="overwrite")
table.add([{"text": "hello world"}])
with pytest.raises(Exception):
table.add([{"text": "hello world"}])
assert len(table) == 1
model = registry.get("test-rate-limited").create()
schema = _get_schema_from_model(model)
table = db.create_table("test", schema=schema, mode="overwrite")
table.add([{"text": "hello world"}])
table.add([{"text": "hello world"}])
assert len(table) == 2
def test_add_optional_vector(tmp_path):
@register("mock-embedding")
class MockEmbeddingFunction(TextEmbeddingFunction):
def ndims(self):
return 128
def generate_embeddings(
self, texts: Union[List[str], np.ndarray]
) -> List[np.array]:
"""
Generate the embeddings for the given texts
"""
return [np.random.randn(self.ndims()).tolist() for _ in range(len(texts))]
registry = get_registry()
model = registry.get("mock-embedding").create()
class LanceSchema(LanceModel):
id: str
vector: Vector(model.ndims()) = model.VectorField(default=None)
text: str = model.SourceField()
db = lancedb.connect(tmp_path)
tbl = db.create_table("optional_vector", schema=LanceSchema)
# add works
expected = LanceSchema(id="id", text="text")
tbl.add([expected])
assert not (np.abs(tbl.to_pandas()["vector"][0]) < 1e-6).all()
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance",
"lancedb.embeddings.registry.get_registry",
"lancedb.conftest.MockTextEmbeddingFunction",
"lancedb.embeddings.registry.register",
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((1948, 1988), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (1986, 1988), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((2476, 2527), 'lance.write_dataset', 'lance.write_dataset', (['table', "(tmp_path / 'test.lance')"], {}), "(table, tmp_path / 'test.lance')\n", (2495, 2527), False, 'import lance\n'), ((2559, 2597), 'lance.dataset', 'lance.dataset', (["(tmp_path / 'test.lance')"], {}), "(tmp_path / 'test.lance')\n", (2572, 2597), False, 'import lance\n'), ((2932, 2961), 'numpy.allclose', 'np.allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (2943, 2961), True, 'import numpy as np\n'), ((3246, 3271), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3261, 3271), False, 'import lancedb\n'), ((3287, 3327), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (3325, 3327), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3980, 4006), 'lancedb.embeddings.registry.register', 'register', (['"""mock-embedding"""'], {}), "('mock-embedding')\n", (3988, 4006), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4419, 4433), 'lancedb.embeddings.registry.get_registry', 'get_registry', ([], {}), '()\n', (4431, 4433), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4660, 4685), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4675, 4685), False, 'import lancedb\n'), ((1570, 1627), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['mock_embed_func', 'data'], {'wrap_api': 'wrap_api'}), '(mock_embed_func, data, wrap_api=wrap_api)\n', (1585, 1627), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3558, 3582), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3571, 3582), False, 'import pytest\n'), ((2069, 2111), 'pyarrow.array', 'pa.array', (["['hello world', 'goodbye world']"], {}), "(['hello world', 'goodbye world'])\n", (2077, 2111), True, 'import pyarrow as pa\n'), ((2310, 2337), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (2335, 2337), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((1102, 1122), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (1117, 1122), True, 'import numpy as np\n'), ((1427, 1451), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (1435, 1451), True, 'import pyarrow as pa\n'), ((1469, 1491), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (1477, 1491), True, 'import pyarrow as pa\n'), ((2136, 2155), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2151, 2155), True, 'import numpy as np\n'), ((2157, 2176), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2172, 2176), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when
``diagnostics=True`` in config and disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled``
or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit,
and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# TODO: In future we might be interested in this metric
# 'engagement_time_msec': 1000
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate
limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage
### from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and
# verbose is disabled to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5466, 5490), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5475, 5490), False, 'from lancedb.utils.general import TryExcept\n'), ((4628, 4639), 'time.time', 'time.time', ([], {}), '()\n', (4637, 4639), False, 'import time\n'), ((2579, 2610), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2608, 2610), False, 'import platform\n'), ((2747, 2762), 'random.random', 'random.random', ([], {}), '()\n', (2760, 2762), False, 'import random\n'), ((4171, 4218), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4192, 4218), False, 'import datetime\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from datetime import timedelta
import lancedb
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from lancedb.pydantic import LanceModel, Vector
def test_basic(tmp_path):
db = lancedb.connect(tmp_path)
assert db.uri == str(tmp_path)
assert db.table_names() == []
table = db.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
rs = table.search([100, 100]).limit(1).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "bar"
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "foo"
assert db.table_names() == ["test"]
assert "test" in db
assert len(db) == 1
assert db.open_table("test").name == db["test"].name
def test_ingest_pd(tmp_path):
db = lancedb.connect(tmp_path)
assert db.uri == str(tmp_path)
assert db.table_names() == []
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
table = db.create_table("test", data=data)
rs = table.search([100, 100]).limit(1).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "bar"
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "foo"
assert db.table_names() == ["test"]
assert "test" in db
assert len(db) == 1
assert db.open_table("test").name == db["test"].name
def test_ingest_iterator(tmp_path):
class PydanticSchema(LanceModel):
vector: Vector(2)
item: str
price: float
arrow_schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float32()),
]
)
def make_batches():
for _ in range(5):
yield from [
# pandas
pd.DataFrame(
{
"vector": [[3.1, 4.1], [1, 1]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
),
# pylist
[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
# recordbatch
pa.RecordBatch.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pa Table
pa.Table.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pydantic list
[
PydanticSchema(vector=[3.1, 4.1], item="foo", price=10.0),
PydanticSchema(vector=[5.9, 26.5], item="bar", price=20.0),
],
# TODO: test pydict separately. it is unique column number and
# name constraints
]
def run_tests(schema):
db = lancedb.connect(tmp_path)
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
tbl.to_pandas()
assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0
assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0
tbl_len = len(tbl)
tbl.add(make_batches())
assert tbl_len == 50
assert len(tbl) == tbl_len * 2
assert len(tbl.list_versions()) == 3
db.drop_database()
run_tests(arrow_schema)
run_tests(PydanticSchema)
def test_table_names(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test2", data=data)
db.create_table("test1", data=data)
db.create_table("test3", data=data)
assert db.table_names() == ["test1", "test2", "test3"]
@pytest.mark.asyncio
async def test_table_names_async(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test2", data=data)
db.create_table("test1", data=data)
db.create_table("test3", data=data)
db = await lancedb.connect_async(tmp_path)
assert await db.table_names() == ["test1", "test2", "test3"]
assert await db.table_names(limit=1) == ["test1"]
assert await db.table_names(start_after="test1", limit=1) == ["test2"]
assert await db.table_names(start_after="test1") == ["test2", "test3"]
def test_create_mode(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
new_data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["fizz", "buzz"],
"price": [10.0, 20.0],
}
)
tbl = db.create_table("test", data=new_data, mode="overwrite")
assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
def test_create_exist_ok(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
tbl = db.create_table("test", data=data)
with pytest.raises(OSError):
db.create_table("test", data=data)
# open the table but don't add more rows
tbl2 = db.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name
assert tbl.schema == tbl2.schema
assert len(tbl) == len(tbl2)
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
]
)
tbl3 = db.create_table("test", schema=schema, exist_ok=True)
assert tbl3.schema == schema
bad_schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
pa.field("extra", pa.float32()),
]
)
with pytest.raises(ValueError):
db.create_table("test", schema=bad_schema, exist_ok=True)
@pytest.mark.asyncio
async def test_connect(tmp_path):
db = await lancedb.connect_async(tmp_path)
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=None)"
db = await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=5)
)
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=5s)"
@pytest.mark.asyncio
async def test_close(tmp_path):
db = await lancedb.connect_async(tmp_path)
assert db.is_open()
db.close()
assert not db.is_open()
with pytest.raises(RuntimeError, match="is closed"):
await db.table_names()
@pytest.mark.asyncio
async def test_create_mode_async(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
await db.create_table("test", data=data)
with pytest.raises(RuntimeError):
await db.create_table("test", data=data)
new_data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["fizz", "buzz"],
"price": [10.0, 20.0],
}
)
_tbl = await db.create_table("test", data=new_data, mode="overwrite")
# MIGRATION: to_pandas() is not available in async
# assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
@pytest.mark.asyncio
async def test_create_exist_ok_async(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
tbl = await db.create_table("test", data=data)
with pytest.raises(RuntimeError):
await db.create_table("test", data=data)
# open the table but don't add more rows
tbl2 = await db.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name
assert await tbl.schema() == await tbl2.schema()
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
]
)
tbl3 = await db.create_table("test", schema=schema, exist_ok=True)
assert await tbl3.schema() == schema
# Migration: When creating a table, but the table already exists, but
# the schema is different, it should raise an error.
# bad_schema = pa.schema(
# [
# pa.field("vector", pa.list_(pa.float32(), list_size=2)),
# pa.field("item", pa.utf8()),
# pa.field("price", pa.float64()),
# pa.field("extra", pa.float32()),
# ]
# )
# with pytest.raises(ValueError):
# await db.create_table("test", schema=bad_schema, exist_ok=True)
@pytest.mark.asyncio
async def test_open_table(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
await db.create_table("test", data=data)
tbl = await db.open_table("test")
assert tbl.name == "test"
assert (
re.search(
r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=None\)",
str(tbl),
)
is not None
)
assert await tbl.schema() == pa.schema(
{
"vector": pa.list_(pa.float32(), list_size=2),
"item": pa.utf8(),
"price": pa.float64(),
}
)
with pytest.raises(ValueError, match="was not found"):
await db.open_table("does_not_exist")
def test_delete_table(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
assert db.table_names() == ["test"]
db.drop_table("test")
assert db.table_names() == []
db.create_table("test", data=data)
assert db.table_names() == ["test"]
# dropping a table that does not exist should pass
# if ignore_missing=True
db.drop_table("does_not_exist", ignore_missing=True)
def test_drop_database(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
new_data = pd.DataFrame(
{
"vector": [[5.1, 4.1], [5.9, 10.5]],
"item": ["kiwi", "avocado"],
"price": [12.0, 17.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
assert db.table_names() == ["test"]
db.create_table("new_test", data=new_data)
db.drop_database()
assert db.table_names() == []
# it should pass when no tables are present
db.create_table("test", data=new_data)
db.drop_table("test")
assert db.table_names() == []
db.drop_database()
assert db.table_names() == []
# creating an empty database with schema
schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))])
db.create_table("empty_table", schema=schema)
# dropping a empty database should pass
db.drop_database()
assert db.table_names() == []
def test_empty_or_nonexistent_table(tmp_path):
db = lancedb.connect(tmp_path)
with pytest.raises(Exception):
db.create_table("test_with_no_data")
with pytest.raises(Exception):
db.open_table("does_not_exist")
schema = pa.schema([pa.field("a", pa.int64(), nullable=False)])
test = db.create_table("test", schema=schema)
class TestModel(LanceModel):
a: int
test2 = db.create_table("test2", schema=TestModel)
assert test.schema == test2.schema
def test_replace_index(tmp_path):
db = lancedb.connect(uri=tmp_path)
table = db.create_table(
"test",
[
{"vector": np.random.rand(128), "item": "foo", "price": float(i)}
for i in range(1000)
],
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
)
with pytest.raises(Exception):
table.create_index(
num_partitions=2,
num_sub_vectors=4,
replace=False,
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
replace=True,
index_cache_size=10,
)
def test_prefilter_with_index(tmp_path):
db = lancedb.connect(uri=tmp_path)
data = [
{"vector": np.random.rand(128), "item": "foo", "price": float(i)}
for i in range(1000)
]
sample_key = data[100]["vector"]
table = db.create_table(
"test",
data,
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
)
table = (
table.search(sample_key)
.where("price == 500", prefilter=True)
.limit(5)
.to_arrow()
)
assert table.num_rows == 1
| [
"lancedb.pydantic.Vector",
"lancedb.connect",
"lancedb.connect_async"
] | [((807, 832), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (822, 832), False, 'import lancedb\n'), ((1559, 1584), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1574, 1584), False, 'import lancedb\n'), ((1667, 1769), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (1679, 1769), True, 'import pandas as pd\n'), ((4917, 4942), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4932, 4942), False, 'import lancedb\n'), ((4954, 5056), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (4966, 5056), True, 'import pandas as pd\n'), ((5369, 5394), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (5384, 5394), False, 'import lancedb\n'), ((5406, 5508), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (5418, 5508), True, 'import pandas as pd\n'), ((6047, 6072), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6062, 6072), False, 'import lancedb\n'), ((6084, 6186), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6096, 6186), True, 'import pandas as pd\n'), ((6378, 6482), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (6390, 6482), True, 'import pandas as pd\n'), ((6715, 6740), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6730, 6740), False, 'import lancedb\n'), ((6752, 6854), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6764, 6854), True, 'import pandas as pd\n'), ((8676, 8778), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (8688, 8778), True, 'import pandas as pd\n'), ((8985, 9089), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (8997, 9089), True, 'import pandas as pd\n'), ((9469, 9571), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (9481, 9571), True, 'import pandas as pd\n'), ((10917, 11019), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (10929, 11019), True, 'import pandas as pd\n'), ((11713, 11738), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11728, 11738), False, 'import lancedb\n'), ((11750, 11852), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (11762, 11852), True, 'import pandas as pd\n'), ((12397, 12422), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12412, 12422), False, 'import lancedb\n'), ((12434, 12536), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (12446, 12536), True, 'import pandas as pd\n'), ((12609, 12716), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi', 'avocado'], 'price':\n [12.0, 17.0]}"], {}), "({'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi',\n 'avocado'], 'price': [12.0, 17.0]})\n", (12621, 12716), True, 'import pandas as pd\n'), ((13583, 13608), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (13598, 13608), False, 'import lancedb\n'), ((14073, 14102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14088, 14102), False, 'import lancedb\n'), ((14718, 14747), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14733, 14747), False, 'import lancedb\n'), ((2370, 2379), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2376, 2379), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((4312, 4337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4327, 4337), False, 'import lancedb\n'), ((5702, 5733), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (5723, 5733), False, 'import lancedb\n'), ((6293, 6317), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6306, 6317), False, 'import pytest\n'), ((6967, 6989), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (6980, 6989), False, 'import pytest\n'), ((7812, 7837), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7825, 7837), False, 'import pytest\n'), ((7977, 8008), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (7998, 8008), False, 'import lancedb\n'), ((8363, 8394), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8384, 8394), False, 'import lancedb\n'), ((8472, 8518), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""is closed"""'}), "(RuntimeError, match='is closed')\n", (8485, 8518), False, 'import pytest\n'), ((8633, 8664), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8654, 8664), False, 'import lancedb\n'), ((8891, 8918), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8904, 8918), False, 'import pytest\n'), ((9426, 9457), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (9447, 9457), False, 'import lancedb\n'), ((9690, 9717), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (9703, 9717), False, 'import pytest\n'), ((10874, 10905), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (10895, 10905), False, 'import lancedb\n'), ((11573, 11621), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""was not found"""'}), "(ValueError, match='was not found')\n", (11586, 11621), False, 'import pytest\n'), ((11959, 11983), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (11972, 11983), False, 'import pytest\n'), ((12822, 12846), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (12835, 12846), False, 'import pytest\n'), ((13618, 13642), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13631, 13642), False, 'import pytest\n'), ((13699, 13723), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13712, 13723), False, 'import pytest\n'), ((14379, 14403), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (14392, 14403), False, 'import pytest\n'), ((14780, 14799), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14794, 14799), True, 'import numpy as np\n'), ((2548, 2557), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2555, 2557), True, 'import pyarrow as pa\n'), ((2590, 2602), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2600, 2602), True, 'import pyarrow as pa\n'), ((7377, 7386), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7384, 7386), True, 'import pyarrow as pa\n'), ((7419, 7431), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7429, 7431), True, 'import pyarrow as pa\n'), ((7685, 7694), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7692, 7694), True, 'import pyarrow as pa\n'), ((7727, 7739), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7737, 7739), True, 'import pyarrow as pa\n'), ((7772, 7784), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7782, 7784), True, 'import pyarrow as pa\n'), ((8180, 8200), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (8189, 8200), False, 'from datetime import timedelta\n'), ((10100, 10109), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (10107, 10109), True, 'import pyarrow as pa\n'), ((10142, 10154), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (10152, 10154), True, 'import pyarrow as pa\n'), ((11501, 11510), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (11508, 11510), True, 'import pyarrow as pa\n'), ((11533, 11545), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (11543, 11545), True, 'import pyarrow as pa\n'), ((13804, 13814), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (13812, 13814), True, 'import pyarrow as pa\n'), ((14181, 14200), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14195, 14200), True, 'import numpy as np\n'), ((2500, 2512), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2510, 2512), True, 'import pyarrow as pa\n'), ((2739, 2836), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'], 'price': [10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (2751, 2836), True, 'import pandas as pd\n'), ((7319, 7331), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7329, 7331), True, 'import pyarrow as pa\n'), ((7627, 7639), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7637, 7639), True, 'import pyarrow as pa\n'), ((10042, 10054), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (10052, 10054), True, 'import pyarrow as pa\n'), ((11453, 11465), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11463, 11465), True, 'import pyarrow as pa\n'), ((13344, 13356), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (13354, 13356), True, 'import pyarrow as pa\n'), ((3386, 3410), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3394, 3410), True, 'import pyarrow as pa\n'), ((3436, 3458), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3444, 3458), True, 'import pyarrow as pa\n'), ((3750, 3774), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3758, 3774), True, 'import pyarrow as pa\n'), ((3800, 3822), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3808, 3822), True, 'import pyarrow as pa\n'), ((3343, 3355), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3353, 3355), True, 'import pyarrow as pa\n'), ((3707, 3719), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3717, 3719), True, 'import pyarrow as pa\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.