input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument, Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import Document, Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(Document):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(Document):
img: Image
class OutputDoc(Document):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(Document):
text: str
class OutputDoc(Document):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import Lumina2Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = Lumina2Transformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": attention_mask,
}
@property
def input_shape(self):
return (4, 16, 16)
@property
def output_shape(self):
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_refiner_layers": 1,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 2,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"scaling_factor": 1.0,
"axes_dim_rope": (4, 2, 2),
"axes_lens": (128, 128, 128),
"cap_feat_dim": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Lumina2Transformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import Lumina2Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = Lumina2Transformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": attention_mask,
}
@property
def input_shape(self):
return (4, 16, 16)
@property
def output_shape(self):
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_refiner_layers": 1,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 2,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"scaling_factor": 1.0,
"axes_dim_rope": (4, 2, 2),
"axes_lens": (128, 128, 128),
"cap_feat_dim": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Lumina2Transformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
import os
from llama_index.core.tools.function_tool import FunctionTool
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": [{"text": "Some content"}, {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
|
import os
from llama_index.core.tools.function_tool import FunctionTool
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content"],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content", {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
|
from llama_index_instrumentation.span.base import BaseSpan # noqa
|
from typing import Any, Dict, Optional
from uuid import uuid4
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
class BaseSpan(BaseModel):
"""Base data class representing a span."""
model_config = ConfigDict(arbitrary_types_allowed=True)
id_: str = Field(default_factory=lambda: str(uuid4()), description="Id of span.")
parent_id: Optional[str] = Field(default=None, description="Id of parent span.")
tags: Dict[str, Any] = Field(default={})
|
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
|
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars_per_channel,
)
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""Mock prompt utils."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
MOCK_SUMMARY_PROMPT_TMPL = "{context_str}\n"
MOCK_SUMMARY_PROMPT = PromptTemplate(
MOCK_SUMMARY_PROMPT_TMPL, prompt_type=PromptType.SUMMARY
)
MOCK_INSERT_PROMPT_TMPL = "{num_chunks}\n{context_list}{new_chunk_text}\n"
MOCK_INSERT_PROMPT = PromptTemplate(
MOCK_INSERT_PROMPT_TMPL, prompt_type=PromptType.TREE_INSERT
)
# # single choice
MOCK_QUERY_PROMPT_TMPL = "{num_chunks}\n{context_list}\n{query_str}'\n"
MOCK_QUERY_PROMPT = PromptTemplate(
MOCK_QUERY_PROMPT_TMPL, prompt_type=PromptType.TREE_SELECT
)
MOCK_REFINE_PROMPT_TMPL = "{query_str}\n{existing_answer}\n{context_msg}\n"
MOCK_REFINE_PROMPT = PromptTemplate(
MOCK_REFINE_PROMPT_TMPL, prompt_type=PromptType.REFINE
)
MOCK_TEXT_QA_PROMPT_TMPL = "{context_str}\n{query_str}\n"
MOCK_TEXT_QA_PROMPT = PromptTemplate(
MOCK_TEXT_QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
MOCK_KEYWORD_EXTRACT_PROMPT_TMPL = "{max_keywords}\n{text}\n"
MOCK_KEYWORD_EXTRACT_PROMPT = PromptTemplate(
MOCK_KEYWORD_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.KEYWORD_EXTRACT
)
# TODO: consolidate with keyword extract
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT_TMPL = "{max_keywords}\n{question}\n"
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT = PromptTemplate(
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.QUERY_KEYWORD_EXTRACT
)
MOCK_SCHEMA_EXTRACT_PROMPT_TMPL = "{text}\n{schema}"
MOCK_SCHEMA_EXTRACT_PROMPT = PromptTemplate(
MOCK_SCHEMA_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.SCHEMA_EXTRACT
)
MOCK_TEXT_TO_SQL_PROMPT_TMPL = "{dialect}\n{schema}\n{query_str}"
MOCK_TEXT_TO_SQL_PROMPT = PromptTemplate(
MOCK_TEXT_TO_SQL_PROMPT_TMPL, prompt_type=PromptType.TEXT_TO_SQL
)
MOCK_TABLE_CONTEXT_PROMPT_TMPL = "{schema}\n{context_str}\n{query_str}"
MOCK_TABLE_CONTEXT_PROMPT = PromptTemplate(
MOCK_TABLE_CONTEXT_PROMPT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
MOCK_KG_TRIPLET_EXTRACT_PROMPT_TMPL = "{max_knowledge_triplets}\n{text}"
MOCK_KG_TRIPLET_EXTRACT_PROMPT = PromptTemplate(
MOCK_KG_TRIPLET_EXTRACT_PROMPT_TMPL,
prompt_type=PromptType.KNOWLEDGE_TRIPLET_EXTRACT,
)
MOCK_INPUT_PROMPT_TMPL = "{query_str}"
MOCK_INPUT_PROMPT = PromptTemplate(
MOCK_INPUT_PROMPT_TMPL, prompt_type=PromptType.SIMPLE_INPUT
)
MOCK_PANDAS_PROMPT_TMPL = "{query_str}\n{df_str}\n{instruction_str}"
MOCK_PANDAS_PROMPT = PromptTemplate(
MOCK_PANDAS_PROMPT_TMPL, prompt_type=PromptType.PANDAS
)
|
"""Mock prompt utils."""
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
MOCK_SUMMARY_PROMPT_TMPL = "{context_str}\n"
MOCK_SUMMARY_PROMPT = PromptTemplate(
MOCK_SUMMARY_PROMPT_TMPL, prompt_type=PromptType.SUMMARY
)
MOCK_INSERT_PROMPT_TMPL = "{num_chunks}\n{context_list}{new_chunk_text}\n"
MOCK_INSERT_PROMPT = PromptTemplate(
MOCK_INSERT_PROMPT_TMPL, prompt_type=PromptType.TREE_INSERT
)
# # single choice
MOCK_QUERY_PROMPT_TMPL = "{num_chunks}\n" "{context_list}\n" "{query_str}'\n"
MOCK_QUERY_PROMPT = PromptTemplate(
MOCK_QUERY_PROMPT_TMPL, prompt_type=PromptType.TREE_SELECT
)
MOCK_REFINE_PROMPT_TMPL = "{query_str}\n" "{existing_answer}\n" "{context_msg}\n"
MOCK_REFINE_PROMPT = PromptTemplate(
MOCK_REFINE_PROMPT_TMPL, prompt_type=PromptType.REFINE
)
MOCK_TEXT_QA_PROMPT_TMPL = "{context_str}\n" "{query_str}\n"
MOCK_TEXT_QA_PROMPT = PromptTemplate(
MOCK_TEXT_QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
MOCK_KEYWORD_EXTRACT_PROMPT_TMPL = "{max_keywords}\n{text}\n"
MOCK_KEYWORD_EXTRACT_PROMPT = PromptTemplate(
MOCK_KEYWORD_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.KEYWORD_EXTRACT
)
# TODO: consolidate with keyword extract
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT_TMPL = "{max_keywords}\n{question}\n"
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT = PromptTemplate(
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.QUERY_KEYWORD_EXTRACT
)
MOCK_SCHEMA_EXTRACT_PROMPT_TMPL = "{text}\n{schema}"
MOCK_SCHEMA_EXTRACT_PROMPT = PromptTemplate(
MOCK_SCHEMA_EXTRACT_PROMPT_TMPL, prompt_type=PromptType.SCHEMA_EXTRACT
)
MOCK_TEXT_TO_SQL_PROMPT_TMPL = "{dialect}\n{schema}\n{query_str}"
MOCK_TEXT_TO_SQL_PROMPT = PromptTemplate(
MOCK_TEXT_TO_SQL_PROMPT_TMPL, prompt_type=PromptType.TEXT_TO_SQL
)
MOCK_TABLE_CONTEXT_PROMPT_TMPL = "{schema}\n{context_str}\n{query_str}"
MOCK_TABLE_CONTEXT_PROMPT = PromptTemplate(
MOCK_TABLE_CONTEXT_PROMPT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
MOCK_KG_TRIPLET_EXTRACT_PROMPT_TMPL = "{max_knowledge_triplets}\n{text}"
MOCK_KG_TRIPLET_EXTRACT_PROMPT = PromptTemplate(
MOCK_KG_TRIPLET_EXTRACT_PROMPT_TMPL,
prompt_type=PromptType.KNOWLEDGE_TRIPLET_EXTRACT,
)
MOCK_INPUT_PROMPT_TMPL = "{query_str}"
MOCK_INPUT_PROMPT = PromptTemplate(
MOCK_INPUT_PROMPT_TMPL, prompt_type=PromptType.SIMPLE_INPUT
)
MOCK_PANDAS_PROMPT_TMPL = "{query_str}\n{df_str}\n{instruction_str}"
MOCK_PANDAS_PROMPT = PromptTemplate(
MOCK_PANDAS_PROMPT_TMPL, prompt_type=PromptType.PANDAS
)
|
from __future__ import annotations
import random
import pytest
import torch
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
from __future__ import annotations
import random
import pytest
import torch
from datasets import Dataset
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
@pytest.mark.skip(reason="This test is rather slow, and the LabelAccuracyEvaluator is not commonly used.")
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
from typing import Any, Dict, List, Sequence, Union
from deprecated import deprecated
from llama_index.core.base.llms.types import (
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
TextBlock,
ImageBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.llms.mistralai import MistralAI
@deprecated(
reason="This package has been deprecated, please use llama-index-llms-mistrala instead. See Multi Modal LLMs documentation for a complete guide on migration: https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/#multi-modal-llms",
version="0.4.1",
)
class MistralAIMultiModal(MistralAI):
def __init__(
self,
model: str = "pixtral-12b-2409",
**kwargs: Any,
) -> None:
super().__init__(
model=model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "mistral_multi_modal_llm"
def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
"api_key": self.api_key,
**kwargs,
}
def _get_multi_modal_chat_messages(
self,
prompt: str,
role: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
) -> List[ChatMessage]:
blocks = []
if all(isinstance(doc, ImageNode) for doc in image_documents):
for image_document in image_documents:
blocks.append(
ImageBlock(
image=image_document.image,
path=image_document.image_path,
url=image_document.image_url,
image_mimetype=image_document.image_mimetype,
)
)
else:
blocks.extend(image_documents)
blocks.append(TextBlock(text=prompt))
return [ChatMessage(role=role, blocks=blocks)]
def complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = self.chat(messages=messages, **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseGen:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = self.stream_chat(messages=messages, **kwargs)
return stream_chat_response_to_completion_response(chat_response)
async def acomplete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = await self.achat(messages=messages, **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseAsyncGen:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = await self.astream_chat(messages=messages, **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
from typing import Any, Dict, List, Sequence
from llama_index.core.base.llms.types import (
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
TextBlock,
ImageBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.llms.mistralai import MistralAI
class MistralAIMultiModal(MistralAI):
def __init__(
self,
model: str = "pixtral-12b-2409",
**kwargs: Any,
) -> None:
super().__init__(
model=model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "mistral_multi_modal_llm"
def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
"api_key": self.api_key,
**kwargs,
}
def _get_multi_modal_chat_messages(
self,
prompt: str,
role: str,
image_documents: Sequence[ImageNode],
) -> List[ChatMessage]:
blocks = []
for image_document in image_documents:
blocks.append(
ImageBlock(
image=image_document.image,
path=image_document.image_path,
url=image_document.image_url,
image_mimetype=image_document.image_mimetype,
)
)
blocks.append(TextBlock(text=prompt))
return [ChatMessage(role=role, blocks=blocks)]
def complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = self.chat(messages=messages, **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseGen:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = self.stream_chat(messages=messages, **kwargs)
return stream_chat_response_to_completion_response(chat_response)
async def acomplete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = await self.achat(messages=messages, **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseAsyncGen:
messages = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER.value, image_documents=image_documents
)
chat_response = await self.astream_chat(messages=messages, **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
"""Simple reader that reads weather data from OpenWeatherMap API."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WeatherReader(BaseReader):
"""
Weather Reader.
Reads the forecast & current weather of any location using OpenWeatherMap's free API.
Check 'https://openweathermap.org/appid' \
on how to generate a free OpenWeatherMap API, It's free.
Args:
token (str): bearer_token that you get from OWM API.
"""
def __init__(
self,
token: str,
) -> None:
"""Initialize with parameters."""
super().__init__()
self.token = token
def load_data(
self,
places: List[str],
) -> List[Document]:
"""
Load weather data for the given locations.
OWM's One Call API provides the following weather data for any geographical coordinate:
- Current weather
- Hourly forecast for 48 hours
- Daily forecast for 7 days.
Args:
places (List[str]) - places you want the weather data for.
"""
try:
import pyowm
except ImportError:
raise ImportError("install pyowm using `pip install pyowm`")
owm = pyowm.OWM(api_key=self.token)
mgr = owm.weather_manager()
reg = owm.city_id_registry()
results = []
for place in places:
info_dict = {}
extra_info = {}
list_of_locations = reg.locations_for(city_name=place)
try:
city = list_of_locations[0]
except ValueError:
raise ValueError(
f"Unable to find {place}, try checking the spelling and try again"
)
lat = city.lat
lon = city.lon
res = mgr.one_call(lat=lat, lon=lon)
extra_info["latitude"] = lat
extra_info["longitude"] = lon
extra_info["timezone"] = res.timezone
info_dict["location"] = place
info_dict["current weather"] = res.current.to_dict()
if res.forecast_daily:
info_dict["daily forecast"] = [i.to_dict() for i in res.forecast_daily]
if res.forecast_hourly:
info_dict["hourly forecast"] = [
i.to_dict() for i in res.forecast_hourly
]
if res.forecast_minutely:
info_dict["minutely forecast"] = [
i.to_dict() for i in res.forecast_minutely
]
if res.national_weather_alerts:
info_dict["national weather alerts"] = [
i.to_dict() for i in res.national_weather_alerts
]
results.append(Document(text=str(info_dict), extra_info=extra_info))
return results
|
"""Simple reader that reads weather data from OpenWeatherMap API."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WeatherReader(BaseReader):
"""
Weather Reader.
Reads the forecast & current weather of any location using OpenWeatherMap's free API.
Check 'https://openweathermap.org/appid' \
on how to generate a free OpenWeatherMap API, It's free.
Args:
token (str): bearer_token that you get from OWM API.
"""
def __init__(
self,
token: str,
) -> None:
"""Initialize with parameters."""
super().__init__()
self.token = token
def load_data(
self,
places: List[str],
) -> List[Document]:
"""
Load weather data for the given locations.
OWM's One Call API provides the following weather data for any geographical coordinate:
- Current weather
- Hourly forecast for 48 hours
- Daily forecast for 7 days.
Args:
places (List[str]) - places you want the weather data for.
"""
try:
import pyowm
except ImportError:
raise ImportError("install pyowm using `pip install pyowm`")
owm = pyowm.OWM(api_key=self.token)
mgr = owm.weather_manager()
reg = owm.city_id_registry()
results = []
for place in places:
info_dict = {}
extra_info = {}
list_of_locations = reg.locations_for(city_name=place)
try:
city = list_of_locations[0]
except ValueError:
raise ValueError(
f"Unable to find {place}, try checking the spelling and try again"
)
lat = city.lat
lon = city.lon
res = mgr.one_call(lat=lat, lon=lon)
extra_info["latitude"] = lat
extra_info["longitude"] = lon
extra_info["timezone"] = res.timezone
info_dict["location"] = place
info_dict["current weather"] = res.current.to_dict()
if res.forecast_daily:
info_dict["daily forecast"] = [i.to_dict() for i in res.forecast_daily]
if res.forecast_hourly:
info_dict["hourly forecast"] = [
i.to_dict() for i in res.forecast_hourly
]
if res.forecast_minutely:
info_dict["minutely forecast"] = [
i.to_dict() for i in res.forecast_minutely
]
if res.national_weather_alerts:
info_dict["national weather alerts"] = [
i.to_dict() for i in res.national_weather_alerts
]
results.append(Document(text=str(info_dict), extra_info=extra_info))
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,
select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,
select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk'
]
|
from langchain_core._api import warn_deprecated
from pydantic.v1.dataclasses import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray <=0.21.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocArray <=0.21 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor] = None
chunks: Optional[DocList[LegacyDocument]] = None
matches: Optional[DocList[LegacyDocument]] = None
blob: Optional[bytes] = None
text: Optional[str] = None
url: Optional[str] = None
embedding: Optional[AnyEmbedding] = None
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]] = None
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray <=0.21.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocArray <=0.21 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, LocalVisBackend, MLflowVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend', 'MLflowVisBackend'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, LocalVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend'
]
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='http://url.com')
assert doc == 'text'
assert doc != 'http://url.com'
doc2 = TextDoc(id=doc.id, text='text', url='http://url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='http://url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = TextDoc()
assert text is not None
assert text.text is None
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = TextDoc(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = TextDoc()
assert text is not None
assert text.text is None
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
from transformers import is_torch_hpu_available, is_torch_xpu_available
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
accelerator = "NA"
if torch.cuda.is_available():
accelerator = "CUDA"
elif is_torch_xpu_available():
accelerator = "XPU"
elif is_torch_hpu_available():
accelerator = "HPU"
print("Torch accelerator:", accelerator)
if accelerator == "CUDA":
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
elif accelerator == "XPU":
print("SYCL version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
elif accelerator == "HPU":
print("HPU version:", torch.__version__.split("+")[-1])
print("Number of HPUs available:", torch.hpu.device_count())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
from transformers import is_torch_xpu_available
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
accelerator = "NA"
if torch.cuda.is_available():
accelerator = "CUDA"
elif is_torch_xpu_available():
accelerator = "XPU"
print("Torch accelerator:", accelerator)
if accelerator == "CUDA":
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
elif accelerator == "XPU":
print("SYCL version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
|
"""Test ChatDeepSeek chat model."""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
"""Test ChatDeepSeek chat model."""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is the square root of 256256?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is the square root of 256256?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
Args:
model: SparseEncoder
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings not applied to sparse models
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SparseEncoder
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
from pathlib import Path
import numpy as np
import pytest as pytest
from jina import Document, DocumentArray, Executor
compose_yml = Path(__file__).parent / 'docker-compose.yml'
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[1] / 'config.yml'))
assert ex.port == 6379
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_connection(indexer, docker_compose):
assert indexer.hostname == '127.0.0.1'
assert indexer.get_query_handler().ping()
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
redis_keys = qh.keys()
assert all(doc.id.encode() in redis_keys for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content == doc.content for query_doc, doc in zip(query, docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert_with_duplicates(indexer, docs, docker_compose):
# insert same docs twice
indexer.upsert(docs, parameters={})
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
assert len(qh.keys()) == 5
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add_existing(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
existing_doc = Document(id=docs[0].id, content='new content')
indexer.add(DocumentArray([existing_doc]), parameters={})
with indexer.get_query_handler() as redis_handler:
result = redis_handler.get(existing_doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content != existing_doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
for doc in docs:
doc.content = 'new ' + doc.content
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
for doc in docs:
result = redis_handler.get(doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content == doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update_non_existing(indexer, docs, docker_compose):
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() not in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search_not_found(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=docs[0].id), Document()])
indexer.search(query, parameters={})
assert query[0].content == docs[0].content
assert query[1].content is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_delete(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
indexer.delete(docs[:2], parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content is None for query_doc in query[:2])
assert all(
query_doc.content == doc.content for query_doc, doc in zip(query[2:], docs[2:])
)
qh = indexer.get_query_handler()
assert len(qh.keys()) == 3
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(indexer, docker_compose):
doc = Document(embedding=np.random.rand(1, 10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (1, 10)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
|
import os
import numpy as np
import pytest as pytest
from jina import Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_connection(indexer, docker_compose):
assert indexer.hostname == '127.0.0.1'
assert indexer.get_query_handler().ping()
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
redis_keys = qh.keys()
assert all(doc.id.encode() in redis_keys for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content == doc.content for query_doc, doc in zip(query, docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert_with_duplicates(indexer, docs, docker_compose):
# insert same docs twice
indexer.upsert(docs, parameters={})
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
assert len(qh.keys()) == 5
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add_existing(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
existing_doc = Document(id=docs[0].id, content='new content')
indexer.add(DocumentArray([existing_doc]), parameters={})
with indexer.get_query_handler() as redis_handler:
result = redis_handler.get(existing_doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content != existing_doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
for doc in docs:
doc.content = 'new ' + doc.content
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
for doc in docs:
result = redis_handler.get(doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content == doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update_non_existing(indexer, docs, docker_compose):
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() not in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search_not_found(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=docs[0].id), Document()])
indexer.search(query, parameters={})
assert query[0].content == docs[0].content
assert query[1].content is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_delete(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
indexer.delete(docs[:2], parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content is None for query_doc in query[:2])
assert all(query_doc.content == doc.content for query_doc, doc in zip(query[2:], docs[2:]))
qh = indexer.get_query_handler()
assert len(qh.keys()) == 3
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(indexer, docker_compose):
doc = Document(embedding=np.random.rand(1, 10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (1, 10)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
|
from __future__ import annotations
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from __future__ import annotations
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def knowledge_distillation_kl_div_loss(pred: Tensor,
soft_label: Tensor,
T: int,
detach_target: bool = True) -> Tensor:
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0,
T: int = 10) -> None:
super().__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred: Tensor,
soft_label: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Loss tensor.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
import asyncio
import time
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
port = random_port()
docs = []
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello',
inputs=Document(text='hello world'),
return_type=Document,
input_type=Document,
):
docs.append(doc.text)
i += 1
assert docs == [f'hello world {i}' for i in range(100)]
assert len(docs) == 100
class WaitStreamExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_delay(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
import asyncio
import time
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
class WaitStreamExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_delay(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAV2VEC2_XLSR_1B,
WAV2VEC2_XLSR_2B,
WAV2VEC2_XLSR_300M,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"WAV2VEC2_XLSR_300M",
"WAV2VEC2_XLSR_1B",
"WAV2VEC2_XLSR_2B",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"WAVLM_BASE",
"WAVLM_BASE_PLUS",
"WAVLM_LARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
"SQUIM_OBJECTIVE",
"SQUIM_SUBJECTIVE",
"SquimObjectiveBundle",
"SquimSubjectiveBundle",
]
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAV2VEC2_XLSR_1B,
WAV2VEC2_XLSR_2B,
WAV2VEC2_XLSR_300M,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"WAV2VEC2_XLSR_300M",
"WAV2VEC2_XLSR_1B",
"WAV2VEC2_XLSR_2B",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"WAVLM_BASE",
"WAVLM_BASE_PLUS",
"WAVLM_LARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
"""Tool for the Wikidata API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wikidata import WikidataAPIWrapper
class WikidataQueryRun(BaseTool):
"""Tool that searches the Wikidata API."""
name: str = "Wikidata"
description: str = (
"A wrapper around Wikidata. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be the exact name of the item you want information about "
"or a Wikidata QID."
)
api_wrapper: WikidataAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikidata tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Wikidata API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wikidata import WikidataAPIWrapper
class WikidataQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Wikidata API."""
name: str = "Wikidata"
description: str = (
"A wrapper around Wikidata. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be the exact name of the item you want information about "
"or a Wikidata QID."
)
api_wrapper: WikidataAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikidata tool."""
return self.api_wrapper.run(query)
|
import os
import time
import pytest
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.pods import Pod
from jina.parsers import set_gateway_parser
from jina.serve.runtimes import asyncio as runtime_asyncio
from jina.serve.executors import BaseExecutor
from tests.helper import _generate_pod_args
@pytest.fixture()
def fake_env():
os.environ['key_parent'] = 'value3'
yield
os.environ.pop('key_parent', None)
class EnvChecker1(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pod_runtime_env_setting(fake_env):
args = _generate_pod_args(
[
'--uses',
'EnvChecker1',
'--env',
'key1=value1',
'--env',
'key2=value2',
]
)
with Pod(args):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_gateway_args(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--host',
'jina-custom-gateway',
'--port',
'23456',
'--protocol',
protocol,
]
)
p = Pod(args)
assert p.args.uses == expected
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_gateway_runtimes(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{"pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with Pod(args) as p:
assert p.args.uses == expected
class RaisingExecutor(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise RuntimeError('intentional error')
def test_failing_executor():
args = _generate_pod_args(
[
'--uses',
'RaisingExecutor',
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_failing_gateway_runtimes(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{_INVALIDJSONINTENTIONALLY_pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.timeout(4)
def test_close_before_start(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
time.sleep(5.0)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.is_signal_handlers_installed.set()
pod.close()
@pytest.mark.timeout(4)
def test_close_before_start_slow_enter(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
time.sleep(5.0)
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.is_signal_handlers_installed.set()
pod.close()
|
import os
import time
import pytest
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.pods import Pod
from jina.parsers import set_gateway_parser
from jina.serve.runtimes import asyncio as runtime_asyncio
from jina.serve.executors import BaseExecutor
from tests.helper import _generate_pod_args
@pytest.fixture()
def fake_env():
os.environ['key_parent'] = 'value3'
yield
os.environ.pop('key_parent', None)
class EnvChecker1(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pod_runtime_env_setting(fake_env):
args = _generate_pod_args(
[
'--uses',
'EnvChecker1',
'--env',
'key1=value1',
'--env',
'key2=value2',
]
)
with Pod(args):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_gateway_args(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--host',
'jina-custom-gateway',
'--port',
'23456',
'--protocol',
protocol,
]
)
p = Pod(args)
assert p.args.uses == expected
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_gateway_runtimes(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{"pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with Pod(args) as p:
assert p.args.uses == expected
class RaisingExecutor(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise RuntimeError('intentional error')
def test_failing_executor():
args = _generate_pod_args(
[
'--uses',
'RaisingExecutor',
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_failing_gateway_runtimes(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{_INVALIDJSONINTENTIONALLY_pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.timeout(4)
def test_close_before_start(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
time.sleep(5.0)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.close()
@pytest.mark.timeout(4)
def test_close_before_start_slow_enter(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
time.sleep(5.0)
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.close()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.models.cloning import clone_model as clone_model
from keras.src.models.model import Model as Model
from keras.src.models.model import model_from_json as model_from_json
from keras.src.models.sequential import Sequential as Sequential
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import save_model as save_model
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.models.cloning import clone_model
from keras.src.models.model import Model
from keras.src.models.model import model_from_json
from keras.src.models.sequential import Sequential
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import save_model
|
# Copyright (c) OpenMMLab. All rights reserved.
from .interpolation import InterpolateTracklets
from .kalman_filter import KalmanFilter
from .similarity import embed_similarity
__all__ = ['KalmanFilter', 'InterpolateTracklets', 'embed_similarity']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .interpolation import InterpolateTracklets
from .kalman_filter import KalmanFilter
__all__ = ['KalmanFilter', 'InterpolateTracklets']
|
from typing import TYPE_CHECKING, Any, Generic, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available # noqa
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
# Below is the hack to make the type checker happy. But `AnyTensor` is defined as a class and with same underlying
# behavior as `Union[TorchTensor, TensorFlowTensor, NdArray]` so it should be fine to use `AnyTensor` as
# the type for `tensor` field in `BaseDoc` class.
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
T = TypeVar("T", bound="AnyTensor")
ShapeT = TypeVar('ShapeT')
class AnyTensor(AbstractTensor, Generic[ShapeT]):
"""
Represents a tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyTensor
class MyTensorDoc(BaseDoc):
tensor: AnyTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyTensorDoc(tensor=tf.zeros(1000, 2))
# Example usage with PyTorch:
import torch
doc = MyTensorDoc(tensor=torch.zeros(1000, 2))
# Example usage with NumPy:
import numpy as np
doc = MyTensorDoc(tensor=np.zeros((1000, 2)))
'''
---
Returns:
Union[TorchTensor, TensorFlowTensor, NdArray]: The validated and converted tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
def __getitem__(self: T, item):
pass
def __setitem__(self, index, value):
pass
def __iter__(self):
pass
def __len__(self):
pass
@classmethod
def _docarray_from_native(cls: Type[T], value: Any):
raise RuntimeError(f'This method should not be called on {cls}.')
@staticmethod
def get_comp_backend():
raise RuntimeError('This method should not be called on AnyTensor.')
def to_protobuf(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
def _docarray_to_json_compatible(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T):
raise RuntimeError(f'This method should not be called on {cls}.')
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
# Check for TorchTensor first, then TensorFlowTensor, then NdArray
if torch_available:
if isinstance(value, TorchTensor):
return value
elif isinstance(value, torch.Tensor):
return TorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return value
elif isinstance(value, tf.Tensor):
return TensorFlowTensor._docarray_from_native(value) # noqa
try:
return NdArray.validate(value, field, config)
except Exception as e: # noqa
print(e)
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_yolos import *
from .feature_extraction_yolos import *
from .image_processing_yolos import *
from .image_processing_yolos_fast import *
from .modeling_yolos import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_yolos import *
from .feature_extraction_yolos import *
from .image_processing_yolos import *
from .modeling_yolos import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
import os
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (string): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CreateSessionSchema(BaseModel):
"""Input for CreateSessionTool."""
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. Note: accepts only secure \
links having https://""",
)
class MultionCreateSession(BaseTool):
"""Tool that creates a new Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "create_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments.
"""
name: str = "create_multion_session"
description: str = """
Create a new web browsing session based on a user's command or request. \
The command should include the full info required for the session. \
Also include an url (defaults to google.com if no better option) \
to start the session. \
Use this tool to create a new Browser Window with provided fields. \
Always the first step to run any activities that can be done using browser.
"""
args_schema: Type[CreateSessionSchema] = CreateSessionSchema
def _run(
self,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
response = multion.new_session({"input": query, "url": url})
return {
"sessionId": response["session_id"],
"Response": response["message"],
}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CreateSessionSchema(BaseModel):
"""Input for CreateSessionTool."""
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. Note: accepts only secure \
links having https://""",
)
class MultionCreateSession(BaseTool): # type: ignore[override]
"""Tool that creates a new Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "create_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments.
"""
name: str = "create_multion_session"
description: str = """
Create a new web browsing session based on a user's command or request. \
The command should include the full info required for the session. \
Also include an url (defaults to google.com if no better option) \
to start the session. \
Use this tool to create a new Browser Window with provided fields. \
Always the first step to run any activities that can be done using browser.
"""
args_schema: Type[CreateSessionSchema] = CreateSessionSchema
def _run(
self,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
response = multion.new_session({"input": query, "url": url})
return {
"sessionId": response["session_id"],
"Response": response["message"],
}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
from typing import Any
from langchain_core.memory import BaseMemory
class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other information that shouldn't
ever change between prompts.
"""
memories: dict[str, Any] = dict()
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
|
from typing import Any
from langchain_core.memory import BaseMemory
class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other information that shouldn't
ever change between prompts.
"""
memories: dict[str, Any] = dict()
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
|
_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, NdArray, TextUrl, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_nested_document_class(field))
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, NdArray, TextUrl, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
assert isinstance(value, doc._get_nested_document_class(field))
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import Optional, Union
import PIL.Image
import torch
from torch import Tensor
from torchvision.transforms.v2 import functional as F, InterpolationMode
from torchvision.transforms.v2.functional._geometry import _check_interpolation
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[tuple[int, ...]],
mean: tuple[float, ...] = (0.5, 0.5, 0.5),
std: tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, list]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = _check_interpolation(interpolation)
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from torchvision.transforms.v2 import functional as F, InterpolationMode
from torchvision.transforms.v2.functional._geometry import _check_interpolation
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = _check_interpolation(interpolation)
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.gateway import BaseGateway
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.gateway import BaseGateway
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
from ...laser_encoder import LaserEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (1024,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
from jinahub.encoder.laser_encoder import LaserEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (1024,)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.loading import load_llm, load_llm_from_config
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"load_llm_from_config": "langchain_community.llms.loading",
"load_llm": "langchain_community.llms.loading",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"load_llm",
"load_llm_from_config",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.loading import load_llm, load_llm_from_config
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"load_llm_from_config": "langchain_community.llms.loading",
"load_llm": "langchain_community.llms.loading",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"load_llm_from_config",
"load_llm",
]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_donut_swin import *
from .feature_extraction_donut import *
from .image_processing_donut import *
from .image_processing_donut_fast import *
from .modeling_donut_swin import *
from .processing_donut import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_donut_swin import *
from .feature_extraction_donut import *
from .image_processing_donut import *
from .modeling_donut_swin import *
from .processing_donut import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
"""Schema for Blobs and Blob Loaders.
The goal is to facilitate decoupling of content loading from content parsing code.
In addition, content loading code should provide a lazy loading interface by default.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
# Re-export Blob and PathLike for backwards compatibility
from langchain_core.documents.base import Blob, PathLike
if TYPE_CHECKING:
from collections.abc import Iterable
class BlobLoader(ABC):
"""Abstract interface for blob loaders implementation.
Implementer should be able to load raw content from a storage system according
to some criteria and return the raw content lazily as a stream of blobs.
"""
@abstractmethod
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
# Re-export Blob and Pathlike for backwards compatibility
__all__ = ["Blob", "BlobLoader", "PathLike"]
|
"""Schema for Blobs and Blob Loaders.
The goal is to facilitate decoupling of content loading from content parsing code.
In addition, content loading code should provide a lazy loading interface by default.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
# Re-export Blob and PathLike for backwards compatibility
from langchain_core.documents.base import Blob as Blob
from langchain_core.documents.base import PathLike as PathLike
if TYPE_CHECKING:
from collections.abc import Iterable
class BlobLoader(ABC):
"""Abstract interface for blob loaders implementation.
Implementer should be able to load raw content from a storage system according
to some criteria and return the raw content lazily as a stream of blobs.
"""
@abstractmethod
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
# Re-export Blob and Pathlike for backwards compatibility
__all__ = ["Blob", "BlobLoader", "PathLike"]
|
from langchain_core.runnables.configurable import (
DynamicRunnable,
RunnableConfigurableAlternatives,
RunnableConfigurableFields,
StrEnum,
make_options_spec,
)
__all__ = [
"DynamicRunnable",
"RunnableConfigurableAlternatives",
"RunnableConfigurableFields",
"StrEnum",
"make_options_spec",
]
|
from langchain_core.runnables.configurable import (
DynamicRunnable,
RunnableConfigurableAlternatives,
RunnableConfigurableFields,
StrEnum,
make_options_spec,
)
__all__ = [
"DynamicRunnable",
"RunnableConfigurableFields",
"StrEnum",
"RunnableConfigurableAlternatives",
"make_options_spec",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
from torch import Tensor
from mmdet.registry import MODELS
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for `Double Head RCNN <https://arxiv.org/abs/1904.06493>`_.
Args:
reg_roi_scale_factor (float): The scale factor to extend the rois
used to extract the regression features.
"""
def __init__(self, reg_roi_scale_factor: float, **kwargs):
super().__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for Double Head RCNN.
https://arxiv.org/abs/1904.06493
"""
def __init__(self, reg_roi_scale_factor, **kwargs):
super(DoubleHeadRoIHead, self).__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time."""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
from langchain_core.runnables.config import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.documents import Document
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
.. code-block:: python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
""" # noqa: E501
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs
)
|
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
from langchain_core.runnables.config import run_in_executor
if TYPE_CHECKING:
from langchain_core.documents import Document
class BaseDocumentTransformer(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of Documents and returns a
sequence of transformed Documents.
Example:
.. code-block:: python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
""" # noqa: E501
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of Documents to be transformed.
Returns:
A sequence of transformed Documents.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs
)
|
"""``langchain-core`` defines the base abstractions for the LangChain ecosystem.
The interfaces for core components like chat models, LLMs, vector stores, retrievers,
and more are defined here. The universal invocation protocol (Runnables) along with
a syntax for combining components (LangChain Expression Language) are also defined here.
No third-party integrations are defined here. The dependencies are kept purposefully
very lightweight.
"""
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
from langchain_core.version import VERSION
__version__ = VERSION
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
|
"""``langchain-core`` defines the base abstractions for the LangChain ecosystem.
The interfaces for core components like chat models, LLMs, vector stores, retrievers,
and more are defined here. The universal invocation protocol (Runnables) along with
a syntax for combining components (LangChain Expression Language) are also defined here.
No third-party integrations are defined here. The dependencies are kept purposefully
very lightweight.
"""
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surface_langchain_deprecation_warnings()
surface_langchain_beta_warnings()
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING:
from docarray.document import Document
class MatchArray(DocumentArrayInMemory):
"""
:class:`MatchArray` inherits from :class:`DocumentArray`.
It's a subset of Documents that represents the matches
:param docs: Set of matches of the `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.adjacency = self._ref_doc.adjacency + 1
def append(self, document: 'Document'):
"""Add a matched document to the current Document.
:param document: Sub-document to be added
"""
document.adjacency = self._ref_doc.adjacency + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""Get the document that this :class:`MatchArray` referring to.
:return: the document the match refers to
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""Get granularity of all document in this array.
:return: the granularity of the documents of which these are match
"""
return self._ref_doc.granularity
@property
def adjacency(self) -> int:
"""Get the adjacency of all document in this array.
:return: the adjacency of the array of matches
"""
return self._ref_doc.adjacency + 1
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from .memory import DocumentArrayInMemory
if TYPE_CHECKING:
from ..document import Document
class MatchArray(DocumentArrayInMemory):
"""
:class:`MatchArray` inherits from :class:`DocumentArray`.
It's a subset of Documents that represents the matches
:param docs: Set of matches of the `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.adjacency = self._ref_doc.adjacency + 1
def append(self, document: 'Document'):
"""Add a matched document to the current Document.
:param document: Sub-document to be added
"""
document.adjacency = self._ref_doc.adjacency + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""Get the document that this :class:`MatchArray` referring to.
:return: the document the match refers to
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""Get granularity of all document in this array.
:return: the granularity of the documents of which these are match
"""
return self._ref_doc.granularity
@property
def adjacency(self) -> int:
"""Get the adjacency of all document in this array.
:return: the adjacency of the array of matches
"""
return self._ref_doc.adjacency + 1
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
from .track_data_preprocessor import TrackDataPreprocessor
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor',
'TrackDataPreprocessor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor'
]
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead'
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Optional, Tuple
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param on_gpu: If use gpu to get the output.
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
on_gpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.on_gpu = on_gpu
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(contents, use_gpu=self.on_gpu)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Any, Dict, Tuple
import numpy as np
import paddlehub as hub
from jina import Executor, DocumentArray, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param on_gpu: If use gpu to get the output.
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
on_gpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Tuple[str] = ('r', ),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.on_gpu = on_gpu
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text'
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(contents, use_gpu=self.on_gpu)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
import os
from abc import abstractmethod
from typing import Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
"""
:private:
Base class for testing tools. This won't show in the documentation, but
the docstrings will be inherited by subclasses.
"""
@property
@abstractmethod
def tool_constructor(self) -> Union[type[BaseTool], BaseTool]:
"""
Returns a class or instance of a tool to be tested.
"""
...
@property
def tool_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the tool constructor.
"""
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - it should not
have {"name", "id", "args"} keys.
"""
return {}
@pytest.fixture
def tool(self) -> BaseTool:
"""
:private:
"""
if isinstance(self.tool_constructor, BaseTool):
if self.tool_constructor_params != {}:
msg = (
"If tool_constructor is an instance of BaseTool, "
"tool_constructor_params must be empty"
)
raise ValueError(msg)
return self.tool_constructor
return self.tool_constructor(**self.tool_constructor_params)
class ToolsUnitTests(ToolsTests):
"""
Base class for tools unit tests.
"""
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars."""
return {}, {}, {}
def test_init(self) -> None:
"""
Test that the tool can be initialized with :attr:`tool_constructor` and
:attr:`tool_constructor_params`. If this fails, check that the
keyword args defined in :attr:`tool_constructor_params` are valid.
"""
if isinstance(self.tool_constructor, BaseTool):
tool = self.tool_constructor
else:
tool = self.tool_constructor(**self.tool_constructor_params)
assert tool is not None
def test_init_from_env(self) -> None:
env_params, tools_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
tool = self.tool_constructor(**tools_params)
assert tool is not None
for k, expected in expected_attrs.items():
actual = getattr(tool, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_has_name(self, tool: BaseTool) -> None:
"""
Tests that the tool has a name attribute to pass to chat models.
If this fails, add a `name` parameter to your tool.
"""
assert tool.name
def test_has_input_schema(self, tool: BaseTool) -> None:
"""
Tests that the tool has an input schema.
If this fails, add an `args_schema` to your tool.
See
`this guide <https://python.langchain.com/docs/how_to/custom_tools/#subclass-basetool>`_
and see how `CalculatorInput` is configured in the
`CustomCalculatorTool.args_schema` attribute
"""
assert tool.get_input_schema()
def test_input_schema_matches_invoke_params(self, tool: BaseTool) -> None:
"""
Tests that the provided example params match the declared input schema.
If this fails, update the `tool_invoke_params_example` attribute to match
the input schema (`args_schema`) of the tool.
"""
# this will be a pydantic object
input_schema = tool.get_input_schema()
assert input_schema(**self.tool_invoke_params_example)
|
import os
from abc import abstractmethod
from typing import Tuple, Type, Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
"""
:private:
Base class for testing tools. This won't show in the documentation, but
the docstrings will be inherited by subclasses.
"""
@property
@abstractmethod
def tool_constructor(self) -> Union[Type[BaseTool], BaseTool]:
"""
Returns a class or instance of a tool to be tested.
"""
...
@property
def tool_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the tool constructor.
"""
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - it should not
have {"name", "id", "args"} keys.
"""
return {}
@pytest.fixture
def tool(self) -> BaseTool:
"""
:private:
"""
if isinstance(self.tool_constructor, BaseTool):
if self.tool_constructor_params != {}:
msg = (
"If tool_constructor is an instance of BaseTool, "
"tool_constructor_params must be empty"
)
raise ValueError(msg)
return self.tool_constructor
return self.tool_constructor(**self.tool_constructor_params)
class ToolsUnitTests(ToolsTests):
"""
Base class for tools unit tests.
"""
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars."""
return {}, {}, {}
def test_init(self) -> None:
"""
Test that the tool can be initialized with :attr:`tool_constructor` and
:attr:`tool_constructor_params`. If this fails, check that the
keyword args defined in :attr:`tool_constructor_params` are valid.
"""
if isinstance(self.tool_constructor, BaseTool):
tool = self.tool_constructor
else:
tool = self.tool_constructor(**self.tool_constructor_params)
assert tool is not None
def test_init_from_env(self) -> None:
env_params, tools_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
tool = self.tool_constructor(**tools_params)
assert tool is not None
for k, expected in expected_attrs.items():
actual = getattr(tool, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_has_name(self, tool: BaseTool) -> None:
"""
Tests that the tool has a name attribute to pass to chat models.
If this fails, add a `name` parameter to your tool.
"""
assert tool.name
def test_has_input_schema(self, tool: BaseTool) -> None:
"""
Tests that the tool has an input schema.
If this fails, add an `args_schema` to your tool.
See
`this guide <https://python.langchain.com/docs/how_to/custom_tools/#subclass-basetool>`_
and see how `CalculatorInput` is configured in the
`CustomCalculatorTool.args_schema` attribute
"""
assert tool.get_input_schema()
def test_input_schema_matches_invoke_params(self, tool: BaseTool) -> None:
"""
Tests that the provided example params match the declared input schema.
If this fails, update the `tool_invoke_params_example` attribute to match
the input schema (`args_schema`) of the tool.
"""
# this will be a pydantic object
input_schema = tool.get_input_schema()
assert input_schema(**self.tool_invoke_params_example)
|
"""
Borrowed from Langchain's Neo4j graph utility functions.
https://github.com/langchain-ai/langchain/blob/95c3e5f85f8ed8026a11e351b57bfae488d654c4/libs/community/langchain_community/graphs/neo4j_graph.py
"""
from typing import Any
LIST_LIMIT = 128
def clean_string_values(text: str) -> str:
return text.replace("\n", " ").replace("\r", " ")
def value_sanitize(d: Any) -> Any:
"""
Sanitize the input dictionary or list.
Sanitizes the input by removing embedding-like values,
lists with more than 128 elements, that are mostly irrelevant for
generating answers in a LLM context. These properties, if left in
results, can occupy significant context space and detract from
the LLM's performance by introducing unnecessary noise and cost.
"""
if isinstance(d, dict):
new_dict = {}
for key, value in d.items():
if isinstance(value, dict):
sanitized_value = value_sanitize(value)
if (
sanitized_value is not None
): # Check if the sanitized value is not None
new_dict[key] = sanitized_value
elif isinstance(value, list):
if len(value) < LIST_LIMIT:
sanitized_value = value_sanitize(value)
if (
sanitized_value is not None
): # Check if the sanitized value is not None
new_dict[key] = sanitized_value
# Do not include the key if the list is oversized
else:
new_dict[key] = value
return new_dict
elif isinstance(d, list):
if len(d) < LIST_LIMIT:
return [
value_sanitize(item) for item in d if value_sanitize(item) is not None
]
else:
return None
else:
return d
|
"""Borrowed from Langchain's Neo4j graph utility functions.
https://github.com/langchain-ai/langchain/blob/95c3e5f85f8ed8026a11e351b57bfae488d654c4/libs/community/langchain_community/graphs/neo4j_graph.py
"""
from typing import Any
LIST_LIMIT = 128
def clean_string_values(text: str) -> str:
return text.replace("\n", " ").replace("\r", " ")
def value_sanitize(d: Any) -> Any:
"""Sanitize the input dictionary or list.
Sanitizes the input by removing embedding-like values,
lists with more than 128 elements, that are mostly irrelevant for
generating answers in a LLM context. These properties, if left in
results, can occupy significant context space and detract from
the LLM's performance by introducing unnecessary noise and cost.
"""
if isinstance(d, dict):
new_dict = {}
for key, value in d.items():
if isinstance(value, dict):
sanitized_value = value_sanitize(value)
if (
sanitized_value is not None
): # Check if the sanitized value is not None
new_dict[key] = sanitized_value
elif isinstance(value, list):
if len(value) < LIST_LIMIT:
sanitized_value = value_sanitize(value)
if (
sanitized_value is not None
): # Check if the sanitized value is not None
new_dict[key] = sanitized_value
# Do not include the key if the list is oversized
else:
new_dict[key] = value
return new_dict
elif isinstance(d, list):
if len(d) < LIST_LIMIT:
return [
value_sanitize(item) for item in d if value_sanitize(item) is not None
]
else:
return None
else:
return d
|
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from .base import BaseFileHandler
class PickleHandler(BaseFileHandler):
str_like = False
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super().load_from_path(filepath, mode='rb', **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('protocol', 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('protocol', 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super().dump_to_path(obj, filepath, mode='wb', **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from .base import BaseFileHandler
class PickleHandler(BaseFileHandler):
str_like = False
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super(PickleHandler, self).load_from_path(
filepath, mode='rb', **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('protocol', 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('protocol', 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super(PickleHandler, self).dump_to_path(
obj, filepath, mode='wb', **kwargs)
|
# dataset settings
dataset_type = 'Objects365V2Dataset'
data_root = 'data/Objects365/Obj365_v2/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'Objects365V2Dataset'
data_root = 'data/Objects365/Obj365_v2/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
# model settings
model = dict(
type='FSAF',
bbox_head=dict(
type='FSAFHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
reg_decoded_bbox=True,
# Only anchor-free branch is implemented. The anchor generator only
# generates 1 anchor at each feature point, as a substitute of the
# grid of features.
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
_delete_=True,
type='IoULoss',
eps=1e-6,
loss_weight=1.0,
reduction='none')),
# training and testing settings
train_cfg=dict(
assigner=dict(
_delete_=True,
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=10, norm_type=2)))
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
# model settings
model = dict(
type='FSAF',
bbox_head=dict(
type='FSAFHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
reg_decoded_bbox=True,
# Only anchor-free branch is implemented. The anchor generator only
# generates 1 anchor at each feature point, as a substitute of the
# grid of features.
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
_delete_=True,
type='IoULoss',
eps=1e-6,
loss_weight=1.0,
reduction='none')),
# training and testing settings
train_cfg=dict(
assigner=dict(
_delete_=True,
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=10, norm_type=2))
|
import torch
from docarray import BaseDocument
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDocument):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDocument):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
import torch
from docarray import Document
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(Document):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(Document):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, Flow
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = f.post(on='/test', inputs=Document(), return_results=True)
assert resp is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = f.post(on='/test', inputs=Document(), return_results=True)
assert resp is not None
|
import os
import tempfile
import httpx
import pytest
from PIL import Image
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.embeddings.cohere.base import VALID_MODEL_INPUT_TYPES
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_v4_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding("I love Cohere!")
assert len(embeddings) > 0
embeddings2 = emb.get_text_embedding("I love Cohere!")
assert len(embeddings2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding_batch(["I love Cohere!", "I love Cohere!"])
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embedding = await emb.aget_image_embedding(f.name)
embedding2 = emb.get_image_embedding(f.name)
assert len(embedding) > 0
assert len(embedding2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embeddings = await emb.aget_image_embedding_batch([f.name, f.name])
embeddings2 = emb.get_image_embedding_batch([f.name, f.name])
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
assert len(embeddings2) == 2
assert len(embeddings2[0]) > 0
assert len(embeddings2[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_all_model_names():
for model_name in VALID_MODEL_INPUT_TYPES:
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name=model_name,
)
embedding = emb.get_text_embedding("Hello, world!")
assert len(embedding) > 0
def test_cohere_embeddings_custom_endpoint_multiprocessing():
"""
When used in multiprocessing, the CohereEmbedding instance will be serialized and deserialized. This test
verifies, that custom base_url's are retained in the spawned processes.
"""
# Arrange: Create a CohereEmbeddings instance with a custom base_url
custom_base_url = "test_endpoint"
api_key = "test_api_key"
embeddings = CohereEmbedding(api_key=api_key, base_url=custom_base_url)
# Act: Simulate serialization and deserialization
serialized_data = embeddings.__getstate__()
deserialized_embeddings = CohereEmbedding.__new__(CohereEmbedding)
deserialized_embeddings.__setstate__(serialized_data)
# Assert: Verify that the deserialized instance retains the correct base_url
assert deserialized_embeddings.base_url == custom_base_url
|
import os
import httpx
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
def test_cohere_embeddings_custom_endpoint_multiprocessing():
"""
When used in multiprocessing, the CohereEmbedding instance will be serialized and deserialized. This test
verifies, that custom base_url's are retained in the spawned processes.
"""
# Arrange: Create a CohereEmbeddings instance with a custom base_url
custom_base_url = "test_endpoint"
api_key = "test_api_key"
embeddings = CohereEmbedding(api_key=api_key, base_url=custom_base_url)
# Act: Simulate serialization and deserialization
serialized_data = embeddings.__getstate__()
deserialized_embeddings = CohereEmbedding.__new__(CohereEmbedding)
deserialized_embeddings.__setstate__(serialized_data)
# Assert: Verify that the deserialized instance retains the correct base_url
assert deserialized_embeddings.base_url == custom_base_url
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
_base_ = './vfnet_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 24
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './vfnet_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 24
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def mask_matrix_nms(masks,
labels,
scores,
filter_thr=-1,
nms_pre=-1,
max_num=-1,
kernel='gaussian',
sigma=2.0,
mask_area=None):
"""Matrix NMS for multi-class masks.
Args:
masks (Tensor): Has shape (num_instances, h, w)
labels (Tensor): Labels of corresponding masks,
has shape (num_instances,).
scores (Tensor): Mask scores of corresponding masks,
has shape (num_instances).
filter_thr (float): Score threshold to filter the masks
after matrix nms. Default: -1, which means do not
use filter_thr.
nms_pre (int): The max number of instances to do the matrix nms.
Default: -1, which means do not use nms_pre.
max_num (int, optional): If there are more than max_num masks after
matrix, only top max_num will be kept. Default: -1, which means
do not use max_num.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
mask_area (Tensor): The sum of seg_masks.
Returns:
tuple(Tensor): Processed mask results.
- scores (Tensor): Updated scores, has shape (n,).
- labels (Tensor): Remained labels, has shape (n,).
- masks (Tensor): Remained masks, has shape (n, w, h).
- keep_inds (Tensor): The indices number of
the remaining mask in the input mask, has shape (n,).
"""
assert len(labels) == len(masks) == len(scores)
if len(labels) == 0:
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
if mask_area is None:
mask_area = masks.sum((1, 2)).float()
else:
assert len(masks) == len(mask_area)
# sort and keep top nms_pre
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = sort_inds
if nms_pre > 0 and len(sort_inds) > nms_pre:
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, -1).float()
# inter.
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
# Upper triangle iou matrix.
iou_matrix = (inter_matrix /
(expanded_mask_area + expanded_mask_area.transpose(1, 0) -
inter_matrix)).triu(diagonal=1)
# label_specific matrix.
expanded_labels = labels.expand(num_masks, num_masks)
# Upper triangle label matrix.
label_matrix = (expanded_labels == expanded_labels.transpose(
1, 0)).triu(diagonal=1)
# IoU compensation
compensate_iou, _ = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks,
num_masks).transpose(1, 0)
# IoU decay
decay_iou = iou_matrix * label_matrix
# Calculate the decay_coefficient
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError(
f'{kernel} kernel is not supported in matrix nms!')
# update the score.
scores = scores * decay_coefficient
if filter_thr > 0:
keep = scores >= filter_thr
keep_inds = keep_inds[keep]
if not keep.any():
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
# sort and keep top max_num
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if max_num > 0 and len(sort_inds) > max_num:
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return scores, labels, masks, keep_inds
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def mask_matrix_nms(masks,
labels,
scores,
filter_thr=-1,
nms_pre=-1,
max_num=-1,
kernel='gaussian',
sigma=2.0,
mask_area=None):
"""Matrix NMS for multi-class masks.
Args:
masks (Tensor): Has shape (num_instances, h, w)
labels (Tensor): Labels of corresponding masks,
has shape (num_instances,).
scores (Tensor): Mask scores of corresponding masks,
has shape (num_instances).
filter_thr (float): Score threshold to filter the masks
after matrix nms. Default: -1, which means do not
use filter_thr.
nms_pre (int): The max number of instances to do the matrix nms.
Default: -1, which means do not use nms_pre.
max_num (int, optional): If there are more than max_num masks after
matrix, only top max_num will be kept. Default: -1, which means
do not use max_num.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
mask_area (Tensor): The sum of seg_masks.
Returns:
tuple(Tensor): Processed mask results.
- scores (Tensor): Updated scores, has shape (n,).
- labels (Tensor): Remained labels, has shape (n,).
- masks (Tensor): Remained masks, has shape (n, w, h).
- keep_inds (Tensor): The indexs number of
the remaining mask in the input mask, has shape (n,).
"""
assert len(labels) == len(masks) == len(scores)
if len(labels) == 0:
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
if mask_area is None:
mask_area = masks.sum((1, 2)).float()
else:
assert len(masks) == len(mask_area)
# sort and keep top nms_pre
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = sort_inds
if nms_pre > 0 and len(sort_inds) > nms_pre:
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, -1).float()
# inter.
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
# Upper triangle iou matrix.
iou_matrix = (inter_matrix /
(expanded_mask_area + expanded_mask_area.transpose(1, 0) -
inter_matrix)).triu(diagonal=1)
# label_specific matrix.
expanded_labels = labels.expand(num_masks, num_masks)
# Upper triangle label matrix.
label_matrix = (expanded_labels == expanded_labels.transpose(
1, 0)).triu(diagonal=1)
# IoU compensation
compensate_iou, _ = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks,
num_masks).transpose(1, 0)
# IoU decay
decay_iou = iou_matrix * label_matrix
# Calculate the decay_coefficient
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError(
f'{kernel} kernel is not supported in matrix nms!')
# update the score.
scores = scores * decay_coefficient
if filter_thr > 0:
keep = scores >= filter_thr
keep_inds = keep_inds[keep]
if not keep.any():
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
# sort and keep top max_num
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if max_num > 0 and len(sort_inds) > max_num:
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return scores, labels, masks, keep_inds
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import BBOX_CODERS
from ..transforms import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
from ..builder import BBOX_CODERS
from ..transforms import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=36)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=36)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
]
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._color import (
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, _jpeg_image_pil, erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
_WARN_ABOUT_BETA_TRANSFORMS = True
_BETA_TRANSFORMS_WARNING = (
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"While we do not expect major breaking changes, some APIs may still change "
"according to user feedback. Please submit any feedback you may have in "
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
"the APIs that we suspect might involve future changes. "
"You can silence this warning by calling torchvision.disable_beta_transforms_warning()."
)
def disable_beta_transforms_warning():
global _WARN_ABOUT_BETA_TRANSFORMS
_WARN_ABOUT_BETA_TRANSFORMS = False
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
_WARN_ABOUT_BETA_TRANSFORMS = True
_BETA_TRANSFORMS_WARNING = (
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"While we do not expect major breaking changes, some APIs may still change "
"according to user feedback. Please submit any feedback you may have in "
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
"the APIs that we suspect might involve future changes. "
"You can silence this warning by calling torchvision.disable_beta_transform_warning()."
)
def disable_beta_transforms_warning():
global _WARN_ABOUT_BETA_TRANSFORMS
_WARN_ABOUT_BETA_TRANSFORMS = False
|
# Copyright (c) OpenMMLab. All rights reserved.
from .fileio import (FileClient, dict_from_file, dump, list_from_file, load,
register_handler)
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'dict_from_file', 'list_from_file',
'register_handler', 'dump', 'load', 'FileClient'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
# type: ignore
from .fileio import (FileClient, dict_from_file, dump, list_from_file, load,
register_handler)
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'dict_from_file', 'list_from_file',
'register_handler', 'dump', 'load', 'FileClient'
]
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)).astype("float32"),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
@pytest.mark.requires_trainable_backend
def test_spectralnorm_higher_dim(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4, 5)),
expected_output_shape=(10, 3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
if backend.config.image_data_format() == "channels_last":
images = np.ones((1, 2, 2, 1))
else:
images = np.ones((1, 1, 2, 2))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
3, (2, 2), padding="same", data_format="channels_last"
),
power_iterations=2,
)
model = models.Sequential([sn_wrapper])
model.compile("rmsprop", loss="mse")
x = np.random.random((4, 8, 8, 3))
y = np.random.random((4, 8, 8, 3))
model.fit(x, y)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)).astype("float32"),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
if backend.config.image_data_format() == "channels_last":
images = np.ones((1, 2, 2, 1))
else:
images = np.ones((1, 1, 2, 2))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
3, (2, 2), padding="same", data_format="channels_last"
),
power_iterations=2,
)
model = models.Sequential([sn_wrapper])
model.compile("rmsprop", loss="mse")
x = np.random.random((4, 8, 8, 3))
y = np.random.random((4, 8, 8, 3))
model.fit(x, y)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
#
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
#
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDocument):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
]
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import Variable
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
from keras.src.backend.tensorflow.core import Variable as BackendVariable
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
from keras.src.backend.jax.core import Variable as BackendVariable
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
from keras.src.backend.torch.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
from keras.src.backend.numpy.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "openvino":
from keras.src.backend.openvino import * # noqa: F403
from keras.src.backend.openvino.core import Variable as BackendVariable
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable): # noqa: F811
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import Variable
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
from keras.src.backend.tensorflow.core import Variable as BackendVariable
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
from keras.src.backend.jax.core import Variable as BackendVariable
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
from keras.src.backend.torch.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
from keras.src.backend.numpy.core import Variable as BackendVariable
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable): # noqa: F811
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py' # noqa
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './fcos_center-normbbox-centeronreg-giou_r50_fpn_gn-head_lsj_200e_8x8_fp16_coco.py' # noqa
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class CNN(Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
config_keys: list[str] = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
config_file_name: str = "cnn_config.json"
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
# mypy: allow-untyped-defs
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING
import torch
from torch.backends import (
__allow_nonbracketed_mutation,
_FP32Precision,
_get_fp32_precision_getter,
_set_fp32_precision_setter,
ContextProp,
PropModule,
)
def is_available():
r"""Return whether PyTorch is built with MKL-DNN support."""
return torch._C._has_mkldnn
VERBOSE_OFF = 0
VERBOSE_ON = 1
VERBOSE_ON_CREATION = 2
class verbose:
"""
On-demand oneDNN (former MKL-DNN) verbosing functionality.
To make it easier to debug performance issues, oneDNN can dump verbose
messages containing information like kernel size, input data size and
execution duration while executing the kernel. The verbosing functionality
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
this methodology dumps messages in all steps. Those are a large amount of
verbose messages. Moreover, for investigating the performance issues,
generally taking verbose messages for one single iteration is enough.
This on-demand verbosing functionality makes it possible to control scope
for verbose message dumping. In the following example, verbose messages
will be dumped out for the second inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
"""
def __init__(self, level):
self.level = level
def __enter__(self):
if self.level == VERBOSE_OFF:
return
st = torch._C._verbose.mkldnn_set_verbose(self.level)
assert (
st
), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
return False
def set_flags(
_enabled=None, _deterministic=None, _allow_tf32=None, _fp32_precision="none"
):
orig_flags = (
torch._C._get_mkldnn_enabled(),
torch._C._get_mkldnn_deterministic(),
torch._C._get_onednn_allow_tf32(),
torch._C._get_fp32_precision_getter("mkldnn", "all"),
)
if _enabled is not None:
torch._C._set_mkldnn_enabled(_enabled)
if _deterministic is not None:
torch._C._set_mkldnn_deterministic(_deterministic)
if _allow_tf32 is not None:
torch._C._set_onednn_allow_tf32(_allow_tf32)
if _fp32_precision is not None:
torch._C._set_fp32_precision_setter("mkldnn", "all", _fp32_precision)
return orig_flags
@contextmanager
def flags(enabled=False, deterministic=False, allow_tf32=True, fp32_precision="none"):
with __allow_nonbracketed_mutation():
orig_flags = set_flags(enabled, deterministic, allow_tf32, fp32_precision)
try:
yield
finally:
with __allow_nonbracketed_mutation():
set_flags(*orig_flags)
class MkldnnModule(PropModule):
def __init__(self, m, name):
super().__init__(m, name)
def is_available(self):
return is_available()
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
deterministic = ContextProp(
torch._C._get_mkldnn_deterministic, torch._C._set_mkldnn_deterministic
)
allow_tf32 = ContextProp(
torch._C._get_onednn_allow_tf32, torch._C._set_onednn_allow_tf32
)
matmul = _FP32Precision("mkldnn", "matmul")
conv = _FP32Precision("mkldnn", "conv")
rnn = _FP32Precision("mkldnn", "rnn")
fp32_precision = ContextProp(
_get_fp32_precision_getter("mkldnn", "all"),
_set_fp32_precision_setter("generic", "all"),
)
if TYPE_CHECKING:
enabled: ContextProp
deterministic: ContextProp
allow_tf32: ContextProp
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
|
# mypy: allow-untyped-defs
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING
import torch
from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
def is_available():
r"""Return whether PyTorch is built with MKL-DNN support."""
return torch._C._has_mkldnn
VERBOSE_OFF = 0
VERBOSE_ON = 1
VERBOSE_ON_CREATION = 2
class verbose:
"""
On-demand oneDNN (former MKL-DNN) verbosing functionality.
To make it easier to debug performance issues, oneDNN can dump verbose
messages containing information like kernel size, input data size and
execution duration while executing the kernel. The verbosing functionality
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
this methodology dumps messages in all steps. Those are a large amount of
verbose messages. Moreover, for investigating the performance issues,
generally taking verbose messages for one single iteration is enough.
This on-demand verbosing functionality makes it possible to control scope
for verbose message dumping. In the following example, verbose messages
will be dumped out for the second inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
"""
def __init__(self, level):
self.level = level
def __enter__(self):
if self.level == VERBOSE_OFF:
return
st = torch._C._verbose.mkldnn_set_verbose(self.level)
assert (
st
), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
return False
def set_flags(_enabled=None, _deterministic=None, _allow_tf32=None):
orig_flags = (
torch._C._get_mkldnn_enabled(),
torch._C._get_mkldnn_deterministic(),
torch._C._get_onednn_allow_tf32(),
)
if _enabled is not None:
torch._C._set_mkldnn_enabled(_enabled)
if _deterministic is not None:
torch._C._set_mkldnn_deterministic(_deterministic)
if _allow_tf32 is not None:
torch._C._set_onednn_allow_tf32(_allow_tf32)
return orig_flags
@contextmanager
def flags(enabled=False, deterministic=False, allow_tf32=True):
with __allow_nonbracketed_mutation():
orig_flags = set_flags(enabled, deterministic, allow_tf32)
try:
yield
finally:
with __allow_nonbracketed_mutation():
set_flags(*orig_flags)
class MkldnnModule(PropModule):
def __init__(self, m, name):
super().__init__(m, name)
def is_available(self):
return is_available()
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
deterministic = ContextProp(
torch._C._get_mkldnn_deterministic, torch._C._set_mkldnn_deterministic
)
allow_tf32 = ContextProp(
torch._C._get_onednn_allow_tf32, torch._C._set_onednn_allow_tf32
)
if TYPE_CHECKING:
enabled: ContextProp
deterministic: ContextProp
allow_tf32: ContextProp
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import VectorStoreIndex
import lance # noqa: F401
import pytest
import pytest_asyncio
try:
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from lancedb.rerankers import LinearCombinationReranker
deps = True
except ImportError:
deps = None
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
@pytest_asyncio.fixture
async def index() -> VectorStoreIndex:
vector_store = LanceDBVectorStore(
overfetch_factor=1,
mode="overwrite",
reranker=LinearCombinationReranker(weight=0.3),
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")},
),
]
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes=nodes)
return VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import VectorStoreIndex
import pytest
import pytest_asyncio
try:
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from lancedb.rerankers import LinearCombinationReranker
deps = True
except ImportError:
deps = None
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
@pytest_asyncio.fixture
async def index() -> VectorStoreIndex:
vector_store = LanceDBVectorStore(
overfetch_factor=1,
mode="overwrite",
reranker=LinearCombinationReranker(weight=0.3),
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")},
),
]
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes=nodes)
return VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques/data
.. versionadded:: 1.6.0
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_categorical.py`
- :ref:`sphx_glr_python_examples_cat_pipeline.py`
"""
from __future__ import annotations
import os
from tempfile import TemporaryDirectory
from time import time
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
"""Assuming you have already downloaded the data into `input` directory."""
df_train = pd.read_csv("./input/cat-in-the-dat/train.csv")
print(
"train data set has got {} rows and {} columns".format(
df_train.shape[0], df_train.shape[1]
)
)
X = df_train.drop(["target"], axis=1)
y = df_train["target"]
for i in range(0, 5):
X["bin_" + str(i)] = X["bin_" + str(i)].astype("category")
for i in range(0, 5):
X["nom_" + str(i)] = X["nom_" + str(i)].astype("category")
for i in range(5, 10):
X["nom_" + str(i)] = X["nom_" + str(i)].apply(int, base=16)
for i in range(0, 6):
X["ord_" + str(i)] = X["ord_" + str(i)].astype("category")
print(
"train data set has got {} rows and {} columns".format(X.shape[0], X.shape[1])
)
return X, y
params = {
"tree_method": "hist",
"device": "cuda",
"n_estimators": 32,
"colsample_bylevel": 0.7,
}
def categorical_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using builtin categorical data support from XGBoost"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Be aware that the encoding for X_train and X_test are the same here. In practice,
# we should try to use an encoder like (sklearn OrdinalEncoder) to obtain the
# categorical values.
# Specify `enable_categorical` to True.
clf = xgb.XGBClassifier(
**params,
eval_metric="auc",
enable_categorical=True,
max_cat_to_onehot=1, # We use optimal partitioning exclusively
)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test), (X_train, y_train)])
clf.save_model(os.path.join(output_dir, "categorical.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using builtin categorical data support:", auc)
def onehot_encoding_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using one-hot encoded data."""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
# Specify `enable_categorical` to False as we are using encoded data.
clf = xgb.XGBClassifier(**params, eval_metric="auc", enable_categorical=False)
clf.fit(
X_train,
y_train,
eval_set=[(X_test, y_test), (X_train, y_train)],
)
clf.save_model(os.path.join(output_dir, "one-hot.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using onehot encoding:", auc)
if __name__ == "__main__":
X, y = load_cat_in_the_dat()
with TemporaryDirectory() as tmpdir:
start = time()
categorical_model(X, y, tmpdir)
end = time()
print("Duration:categorical", end - start)
X = pd.get_dummies(X)
start = time()
onehot_encoding_model(X, y, tmpdir)
end = time()
print("Duration:onehot", end - start)
|
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques/data
.. versionadded:: 1.6.0
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_categorical.py`
- :ref:`sphx_glr_python_examples_cat_pipeline.py`
"""
from __future__ import annotations
import os
from tempfile import TemporaryDirectory
from time import time
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
"""Assuming you have already downloaded the data into `input` directory."""
df_train = pd.read_csv("./input/cat-in-the-dat/train.csv")
print(
"train data set has got {} rows and {} columns".format(
df_train.shape[0], df_train.shape[1]
)
)
X = df_train.drop(["target"], axis=1)
y = df_train["target"]
for i in range(0, 5):
X["bin_" + str(i)] = X["bin_" + str(i)].astype("category")
for i in range(0, 5):
X["nom_" + str(i)] = X["nom_" + str(i)].astype("category")
for i in range(5, 10):
X["nom_" + str(i)] = X["nom_" + str(i)].apply(int, base=16)
for i in range(0, 6):
X["ord_" + str(i)] = X["ord_" + str(i)].astype("category")
print(
"train data set has got {} rows and {} columns".format(X.shape[0], X.shape[1])
)
return X, y
params = {
"tree_method": "hist",
"device": "cuda",
"n_estimators": 32,
"colsample_bylevel": 0.7,
}
def categorical_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using builtin categorical data support from XGBoost"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Specify `enable_categorical` to True.
clf = xgb.XGBClassifier(
**params,
eval_metric="auc",
enable_categorical=True,
max_cat_to_onehot=1, # We use optimal partitioning exclusively
)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test), (X_train, y_train)])
clf.save_model(os.path.join(output_dir, "categorical.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using builtin categorical data support:", auc)
def onehot_encoding_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using one-hot encoded data."""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
# Specify `enable_categorical` to False as we are using encoded data.
clf = xgb.XGBClassifier(**params, eval_metric="auc", enable_categorical=False)
clf.fit(
X_train,
y_train,
eval_set=[(X_test, y_test), (X_train, y_train)],
)
clf.save_model(os.path.join(output_dir, "one-hot.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using onehot encoding:", auc)
if __name__ == "__main__":
X, y = load_cat_in_the_dat()
with TemporaryDirectory() as tmpdir:
start = time()
categorical_model(X, y, tmpdir)
end = time()
print("Duration:categorical", end - start)
X = pd.get_dummies(X)
start = time()
onehot_encoding_model(X, y, tmpdir)
end = time()
print("Duration:onehot", end - start)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[8, 11])
runner = dict(max_epochs=12)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[8, 11])
runner = dict(max_epochs=12)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object.
A VerticesAndFaces Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the vertices information (`VerticesAndFaces.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the faces information (`VerticesAndFaces.faces`)
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object.
A VerticesAndFaces Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the vertices information (`VerticesAndFaces.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the faces information (`VerticesAndFaces.faces`)
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
"""Determination of parameter bounds"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..preprocessing import LabelBinarizer
from ..utils._param_validation import Interval, StrOptions, validate_params
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_array, check_consistent_length
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like"],
"loss": [StrOptions({"squared_hinge", "log"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with penalty='l1'.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
|
"""Determination of parameter bounds"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..preprocessing import LabelBinarizer
from ..utils._param_validation import Interval, StrOptions, validate_params
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_array, check_consistent_length
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like"],
"loss": [StrOptions({"squared_hinge", "log"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for C.
The lower bound for C is computed such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
|
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 131000,
"mistral-saba-latest": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 256000,
"open-mistral-nemo-latest": 131000,
"ministral-8b-latest": 131000,
"ministral-3b-latest": 131000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
"mistral-small-latest",
"codestral-latest",
"open-mistral-nemo-latest",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 32000,
"open-mistral-nemo-latest": 128000,
"ministral-8b-latest": 128000,
"ministral-3b-latest": 128000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
"mistral-small-latest",
"codestral-latest",
"open-mistral-nemo-latest",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the "
"server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
def mixin_raft_parser(arg_group):
"""Mixing in arguments required by the RAFT Node. All these args are used to configure the RAFT nodes
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--raft-configuration',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when "
"starting the RAFT node.",
default=None,
)
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that modules like attention processors are listed in the documentation file.
```bash
python utils/check_support_list.py
```
It has no auto-fix mode.
"""
import os
import re
# All paths are set with the intent that you run this script from the root of the repo
REPO_PATH = "."
def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"):
"""
Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class.
Returns a list of documented class names (just the class name portion).
"""
with open(os.path.join(REPO_PATH, doc_path), "r") as f:
doctext = f.read()
matches = re.findall(autodoc_regex, doctext)
return [match.split(".")[-1] for match in matches]
def read_source_classes(src_path, class_regex, exclude_conditions=None):
"""
Reads class names from a source file using a regex that captures class definitions.
Optionally exclude classes based on a list of conditions (functions that take class name and return bool).
"""
if exclude_conditions is None:
exclude_conditions = []
with open(os.path.join(REPO_PATH, src_path), "r") as f:
doctext = f.read()
classes = re.findall(class_regex, doctext)
# Filter out classes that meet any of the exclude conditions
filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)]
return filtered_classes
def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None):
"""
Generic function to check if all classes defined in `src_path` are documented in `doc_path`.
Returns a set of undocumented class names.
"""
documented = set(read_documented_classes(doc_path, doc_regex))
source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions))
# Find which classes in source are not documented in a deterministic way.
undocumented = sorted(source_classes - documented)
return undocumented
if __name__ == "__main__":
# Define the checks we need to perform
checks = {
"Attention Processors": {
"doc_path": "docs/source/en/api/attnprocessor.md",
"src_path": "src/diffusers/models/attention_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
"exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"],
},
"Image Processors": {
"doc_path": "docs/source/en/api/image_processor.md",
"src_path": "src/diffusers/image_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
},
"Activations": {
"doc_path": "docs/source/en/api/activations.md",
"src_path": "src/diffusers/models/activations.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
},
"Normalizations": {
"doc_path": "docs/source/en/api/normalization.md",
"src_path": "src/diffusers/models/normalization.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
"exclude_conditions": [
# Exclude LayerNorm as it's an intentional exception
lambda c: c == "LayerNorm"
],
},
"LoRA Mixins": {
"doc_path": "docs/source/en/api/loaders/lora.md",
"src_path": "src/diffusers/loaders/lora_pipeline.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]",
},
}
missing_items = {}
for category, params in checks.items():
undocumented = check_documentation(
doc_path=params["doc_path"],
src_path=params["src_path"],
doc_regex=params["doc_regex"],
src_regex=params["src_regex"],
exclude_conditions=params.get("exclude_conditions"),
)
if undocumented:
missing_items[category] = undocumented
# If we have any missing items, raise a single combined error
if missing_items:
error_msg = ["Some classes are not documented properly:\n"]
for category, classes in missing_items.items():
error_msg.append(f"- {category}: {', '.join(sorted(classes))}")
raise ValueError("\n".join(error_msg))
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that modules like attention processors are listed in the documentation file.
```bash
python utils/check_support_list.py
```
It has no auto-fix mode.
"""
import os
import re
# All paths are set with the intent that you run this script from the root of the repo
REPO_PATH = "."
def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"):
"""
Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class.
Returns a list of documented class names (just the class name portion).
"""
with open(os.path.join(REPO_PATH, doc_path), "r") as f:
doctext = f.read()
matches = re.findall(autodoc_regex, doctext)
return [match.split(".")[-1] for match in matches]
def read_source_classes(src_path, class_regex, exclude_conditions=None):
"""
Reads class names from a source file using a regex that captures class definitions.
Optionally exclude classes based on a list of conditions (functions that take class name and return bool).
"""
if exclude_conditions is None:
exclude_conditions = []
with open(os.path.join(REPO_PATH, src_path), "r") as f:
doctext = f.read()
classes = re.findall(class_regex, doctext)
# Filter out classes that meet any of the exclude conditions
filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)]
return filtered_classes
def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None):
"""
Generic function to check if all classes defined in `src_path` are documented in `doc_path`.
Returns a set of undocumented class names.
"""
documented = set(read_documented_classes(doc_path, doc_regex))
source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions))
# Find which classes in source are not documented in a deterministic way.
undocumented = sorted(source_classes - documented)
return undocumented
if __name__ == "__main__":
# Define the checks we need to perform
checks = {
"Attention Processors": {
"doc_path": "docs/source/en/api/attnprocessor.md",
"src_path": "src/diffusers/models/attention_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
"exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"],
},
"Image Processors": {
"doc_path": "docs/source/en/api/image_processor.md",
"src_path": "src/diffusers/image_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
},
"Activations": {
"doc_path": "docs/source/en/api/activations.md",
"src_path": "src/diffusers/models/activations.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
},
"Normalizations": {
"doc_path": "docs/source/en/api/normalization.md",
"src_path": "src/diffusers/models/normalization.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
"exclude_conditions": [
# Exclude LayerNorm as it's an intentional exception
lambda c: c == "LayerNorm"
],
},
"LoRA Mixins": {
"doc_path": "docs/source/en/api/loaders/lora.md",
"src_path": "src/diffusers/loaders/lora_pipeline.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
},
}
missing_items = {}
for category, params in checks.items():
undocumented = check_documentation(
doc_path=params["doc_path"],
src_path=params["src_path"],
doc_regex=params["doc_regex"],
src_regex=params["src_regex"],
exclude_conditions=params.get("exclude_conditions"),
)
if undocumented:
missing_items[category] = undocumented
# If we have any missing items, raise a single combined error
if missing_items:
error_msg = ["Some classes are not documented properly:\n"]
for category, classes in missing_items.items():
error_msg.append(f"- {category}: {', '.join(sorted(classes))}")
raise ValueError("\n".join(error_msg))
|
from typing import Any, Iterator, List, Optional
from urllib.parse import urljoin, urlparse
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class GitbookLoader(WebBaseLoader):
"""Load `GitBook` data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
continue_on_failure: bool = False,
show_progress: bool = True,
*,
sitemap_url: Optional[str] = None,
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page`.
content_selector: The CSS selector for the content to load.
Defaults to "main".
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
show_progress: whether to show a progress bar while loading. Default: True
sitemap_url: Custom sitemap URL to use when load_all_paths is True.
Defaults to "{base_url}/sitemap.xml".
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
if sitemap_url:
web_page = sitemap_url
else:
web_page = f"{self.base_url}/sitemap.xml"
super().__init__(
web_paths=(web_page,),
continue_on_failure=continue_on_failure,
show_progress=show_progress,
)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
def lazy_load(self) -> Iterator[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
for soup_info, url in zip(soup_infos, urls):
doc = self._get_document(soup_info, url)
if doc:
yield doc
else:
soup_info = self.scrape()
doc = self._get_document(soup_info, self.web_path)
if doc:
yield doc
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
|
from typing import Any, Iterator, List, Optional
from urllib.parse import urljoin, urlparse
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class GitbookLoader(WebBaseLoader):
"""Load `GitBook` data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
continue_on_failure: bool = False,
show_progress: bool = True,
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page`.
content_selector: The CSS selector for the content to load.
Defaults to "main".
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
show_progress: whether to show a progress bar while loading. Default: True
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
web_page = f"{self.base_url}/sitemap.xml"
super().__init__(
web_paths=(web_page,),
continue_on_failure=continue_on_failure,
show_progress=show_progress,
)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
def lazy_load(self) -> Iterator[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
for soup_info, url in zip(soup_infos, urls):
doc = self._get_document(soup_info, url)
if doc:
yield doc
else:
soup_info = self.scrape()
doc = self._get_document(soup_info, self.web_path)
if doc:
yield doc
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
|
"""Test Fireworks LLM."""
from typing import cast
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_fireworks import Fireworks
def test_fireworks_api_key_is_secret_string() -> None:
"""Test that the API key is stored as a SecretStr."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.fireworks_api_key, SecretStr)
# Test api_key alias
llm = Fireworks(
api_key="secret-api-key", # type: ignore[arg-type]
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.fireworks_api_key, SecretStr)
def test_fireworks_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked when passed from an environment variable."""
monkeypatch.setenv("FIREWORKS_API_KEY", "secret-api-key")
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
print(llm.fireworks_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_fireworks_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test that the API key is masked when passed via the constructor."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
print(llm.fireworks_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_fireworks_uses_actual_secret_value_from_secretstr() -> None:
"""Test that the actual secret value is correctly retrieved."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert cast(SecretStr, llm.fireworks_api_key).get_secret_value() == "secret-api-key"
def test_fireworks_model_params() -> None:
# Test standard tracing params
llm = Fireworks(model="foo", api_key="secret-api-key") # type: ignore[arg-type]
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "fireworks",
"ls_model_type": "llm",
"ls_model_name": "foo",
}
llm = Fireworks(
model="foo",
api_key="secret-api-key", # type: ignore[arg-type]
max_tokens=10,
temperature=0.1,
)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "fireworks",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_max_tokens": 10,
"ls_temperature": 0.1,
}
|
"""Test Fireworks LLM"""
from typing import cast
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_fireworks import Fireworks
def test_fireworks_api_key_is_secret_string() -> None:
"""Test that the API key is stored as a SecretStr."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.fireworks_api_key, SecretStr)
# Test api_key alias
llm = Fireworks(
api_key="secret-api-key", # type: ignore[arg-type]
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.fireworks_api_key, SecretStr)
def test_fireworks_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked when passed from an environment variable."""
monkeypatch.setenv("FIREWORKS_API_KEY", "secret-api-key")
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
print(llm.fireworks_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_fireworks_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test that the API key is masked when passed via the constructor."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
print(llm.fireworks_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_fireworks_uses_actual_secret_value_from_secretstr() -> None:
"""Test that the actual secret value is correctly retrieved."""
llm = Fireworks( # type: ignore[call-arg]
fireworks_api_key="secret-api-key",
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
assert cast(SecretStr, llm.fireworks_api_key).get_secret_value() == "secret-api-key"
def test_fireworks_model_params() -> None:
# Test standard tracing params
llm = Fireworks(model="foo", api_key="secret-api-key") # type: ignore[arg-type]
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "fireworks",
"ls_model_type": "llm",
"ls_model_name": "foo",
}
llm = Fireworks(
model="foo",
api_key="secret-api-key", # type: ignore[arg-type]
max_tokens=10,
temperature=0.1,
)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "fireworks",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_max_tokens": 10,
"ls_temperature": 0.1,
}
|
__version__ = '0.12.2'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.1'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import (
ChatModelIntegrationTests,
)
from langchain_groq import ChatGroq
rate_limiter = InMemoryRateLimiter(requests_per_second=0.2)
class BaseTestGroq(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
class TestGroqLlama(BaseTestGroq):
@property
def chat_model_params(self) -> dict:
return {
"model": "llama-3.1-8b-instant",
"temperature": 0,
"rate_limiter": rate_limiter,
}
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import (
ChatModelIntegrationTests,
)
from langchain_groq import ChatGroq
rate_limiter = InMemoryRateLimiter(requests_per_second=0.2)
class BaseTestGroq(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatGroq
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
class TestGroqLlama(BaseTestGroq):
@property
def chat_model_params(self) -> dict:
return {
"model": "llama-3.1-8b-instant",
"temperature": 0,
"rate_limiter": rate_limiter,
}
@property
def supports_json_mode(self) -> bool:
return True
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return features.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertImageDtype(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, features.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
) -> Union[features.TensorImageType, features.TensorVideoType]:
# TODO: the `inpt.as_subclass(torch.Tensor)` call can be removed as soon as we have a proper dispatcher that
# handles this. See https://github.com/pytorch/vision/pull/6783 for details.
output = F.convert_image_dtype(inpt.as_subclass(torch.Tensor), dtype=self.dtype)
return (
output if features.is_simple_tensor(inpt) else type(inpt).wrap_like(inpt, output) # type: ignore[attr-defined]
)
class ConvertColorSpace(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, PIL.Image.Image, features.Video)
def __init__(
self,
color_space: Union[str, features.ColorSpace],
old_color_space: Optional[Union[str, features.ColorSpace]] = None,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = features.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
return F.convert_color_space(inpt, color_space=self.color_space, old_color_space=self.old_color_space)
class ClampBoundingBoxes(Transform):
_transformed_types = (features.BoundingBox,)
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return features.BoundingBox.wrap_like(inpt, output)
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return features.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertImageDtype(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, features.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
) -> Union[features.TensorImageType, features.TensorVideoType]:
# TODO: the `inpt.as_subclass(torch.Tensor)` call can be removed as soon as we have a proper dispatcher that
# handles this. See https://github.com/pytorch/vision/pull/6783 for details.
output = F.convert_image_dtype(inpt.as_subclass(torch.Tensor), dtype=self.dtype)
return (
output if features.is_simple_tensor(inpt) else type(inpt).wrap_like(inpt, output) # type: ignore[attr-defined]
)
class ConvertColorSpace(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, PIL.Image.Image, features.Video)
def __init__(
self,
color_space: Union[str, features.ColorSpace],
old_color_space: Optional[Union[str, features.ColorSpace]] = None,
copy: bool = True,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = features.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
self.copy = copy
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
return F.convert_color_space(
inpt, color_space=self.color_space, old_color_space=self.old_color_space, copy=self.copy
)
class ClampBoundingBoxes(Transform):
_transformed_types = (features.BoundingBox,)
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return features.BoundingBox.wrap_like(inpt, output)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.huggingface import (
HuggingFaceEmbedding,
HuggingFaceInferenceAPIEmbedding,
)
def test_huggingfaceembedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_huggingfaceapiembedding_class():
names_of_base_classes = [
b.__name__ for b in HuggingFaceInferenceAPIEmbedding.__mro__
]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_embedding_retry():
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
# Test successful embedding
result = embed_model._embed(["This is a test sentence"])
assert isinstance(result, list)
assert len(result) == 1
assert isinstance(result[0], list)
assert all(isinstance(x, float) for x in result[0])
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.huggingface import (
HuggingFaceEmbedding,
HuggingFaceInferenceAPIEmbedding,
)
import pytest
def test_huggingfaceembedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_huggingfaceapiembedding_class():
names_of_base_classes = [
b.__name__ for b in HuggingFaceInferenceAPIEmbedding.__mro__
]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_input_validation():
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
# Test empty input
with pytest.raises(ValueError, match="Input text cannot be empty or whitespace"):
embed_model._validate_input("")
# Test whitespace input
with pytest.raises(ValueError, match="Input text cannot be empty or whitespace"):
embed_model._validate_input(" ")
# Test valid input
embed_model._validate_input("This is a valid input") # Should not raise
def test_embedding_retry():
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
# Test successful embedding
result = embed_model._embed(["This is a test sentence"])
assert isinstance(result, list)
assert len(result) == 1
assert isinstance(result[0], list)
assert all(isinstance(x, float) for x in result[0])
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Tuple
import os
import sys
import torch
import fire
import time
import json
from pathlib import Path
from fairscale.nn.model_parallel.initialize import initialize_model_parallel
from llama import ModelArgs, Transformer, Tokenizer, LLaMA
def setup_model_parallel() -> Tuple[int, int]:
local_rank = int(os.environ.get("LOCAL_RANK", -1))
world_size = int(os.environ.get("WORLD_SIZE", -1))
torch.distributed.init_process_group("nccl")
initialize_model_parallel(world_size)
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
return local_rank, world_size
def load(
ckpt_dir: str,
tokenizer_path: str,
local_rank: int,
world_size: int,
max_seq_len: int,
max_batch_size: int,
) -> LLaMA:
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert world_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {world_size}"
ckpt_path = checkpoints[local_rank]
print("Loading")
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params
)
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model = Transformer(model_args)
torch.set_default_tensor_type(torch.FloatTensor)
model.load_state_dict(checkpoint, strict=False)
generator = LLaMA(model, tokenizer)
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return generator
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.8,
top_p: float = 0.95,
max_seq_len: int = 512,
max_batch_size: int = 32,
):
local_rank, world_size = setup_model_parallel()
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
generator = load(
ckpt_dir, tokenizer_path, local_rank, world_size, max_seq_len, max_batch_size
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"Building a website can be done in 10 simple steps:\n",
# Few shot prompts: https://huggingface.co/blog/few-shot-learning-gpt-neo-and-inference-api
"""Tweet: "I hate it when my phone battery dies."
Sentiment: Negative
###
Tweet: "My day has been 👍"
Sentiment: Positive
###
Tweet: "This is the link to the article"
Sentiment: Neutral
###
Tweet: "This new music video was incredibile"
Sentiment:""",
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.generate(
prompts, max_gen_len=256, temperature=temperature, top_p=top_p
)
for result in results:
print(result)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Tuple
import os
import sys
import torch
import fire
import time
import json
from pathlib import Path
from fairscale.nn.model_parallel.initialize import initialize_model_parallel
from llama import ModelArgs, Transformer, Tokenizer, LLaMA
def setup_model_parallel() -> Tuple[int, int]:
local_rank = int(os.environ.get("LOCAL_RANK", -1))
world_size = int(os.environ.get("WORLD_SIZE", -1))
torch.distributed.init_process_group("nccl")
initialize_model_parallel(world_size)
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
return local_rank, world_size
def load(ckpt_dir: str, tokenizer_path: str, local_rank: int, world_size: int) -> LLaMA:
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert (
world_size == len(checkpoints)
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {world_size}"
ckpt_path = checkpoints[local_rank]
print("Loading")
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(max_seq_len=1024, max_batch_size=32, **params)
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model = Transformer(model_args)
torch.set_default_tensor_type(torch.FloatTensor)
model.load_state_dict(checkpoint, strict=False)
generator = LLaMA(model, tokenizer)
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return generator
def main(ckpt_dir: str, tokenizer_path: str, temperature: float = 0.8, top_p: float = 0.95):
local_rank, world_size = setup_model_parallel()
if local_rank > 0:
sys.stdout = open(os.devnull, 'w')
generator = load(ckpt_dir, tokenizer_path, local_rank, world_size)
prompts = ["The capital of Germany is the city of", "Here is my sonnet in the style of Shakespeare about an artificial intelligence:"]
results = generator.generate(prompts, max_gen_len=256, temperature=temperature, top_p=top_p)
for result in results:
print(result)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profiler_wrapper.cc pybind methods."""
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.profiler.internal import _pywrap_profiler_plugin as profiler_wrapper_plugin
class ProfilerSessionTest(test_util.TensorFlowTestCase):
def test_xspace_to_tools_data_default_options(self):
# filenames only used for `hlo_proto` tool.
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer')
def _test_xspace_to_tools_data_options(self, options):
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer', options)
def test_xspace_to_tools_data_empty_options(self):
self._test_xspace_to_tools_data_options({})
def test_xspace_to_tools_data_int_options(self):
self._test_xspace_to_tools_data_options({'example_option': 0})
def test_xspace_to_tools_data_str_options(self):
self._test_xspace_to_tools_data_options({'example_option': 'example'})
if __name__ == '__main__':
test.main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profiler_wrapper.cc pybind methods."""
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.profiler.internal import _pywrap_profiler_plugin as profiler_wrapper_plugin
class ProfilerSessionTest(test_util.TensorFlowTestCase):
def test_xspace_to_tools_data_default_options(self):
# filenames only used for `tf_data_bottleneck_analysis` and
# `hlo_proto` tools.
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer')
def _test_xspace_to_tools_data_options(self, options):
profiler_wrapper_plugin.xspace_to_tools_data([], 'trace_viewer', options)
def test_xspace_to_tools_data_empty_options(self):
self._test_xspace_to_tools_data_options({})
def test_xspace_to_tools_data_int_options(self):
self._test_xspace_to_tools_data_options({'example_option': 0})
def test_xspace_to_tools_data_str_options(self):
self._test_xspace_to_tools_data_options({'example_option': 'example'})
if __name__ == '__main__':
test.main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset'
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.