index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
63,951
openai.pagination
SyncCursorPage
null
class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def next_page_info(self) -> Optional[PageInfo]: data = self.data if not data: return None item = cast(Any, data[-1]) if not isinstance(item, CursorPageItem) or item.id is None: # TODO emit warning log return None return PageInfo(params={"after": item.id})
(*, data: List[~_T], **extra_data: Any) -> None
63,959
openai._base_client
__iter__
null
def __iter__(self) -> Iterator[_T]: # type: ignore for page in self.iter_pages(): for item in page._get_page_items(): yield item
(self) -> Iterator[~_T]
63,972
openai.pagination
_get_page_items
null
@override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data
(self) -> List[~_T]
63,973
openai._base_client
_info_to_options
null
def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: options = model_copy(self._options) options._strip_raw_response_header() if not isinstance(info.params, NotGiven): options.params = {**options.params, **info.params} return options if not isinstance(info.url, NotGiven): params = self._params_from_url(info.url) url = info.url.copy_with(params=params) options.params = dict(url.params) options.url = str(url) return options raise ValueError("Unexpected PageInfo state")
(self, info: openai._base_client.PageInfo) -> openai._models.FinalRequestOptions
63,975
openai._base_client
_params_from_url
null
def _params_from_url(self, url: URL) -> httpx.QueryParams: # TODO: do we have to preprocess params here? return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params)
(self, url: httpx.URL) -> httpx.QueryParams
63,976
openai._base_client
_set_private_attributes
null
def _set_private_attributes( self, client: SyncAPIClient, model: Type[_T], options: FinalRequestOptions, ) -> None: self._model = model self._client = client self._options = options
(self, client: openai._base_client.SyncAPIClient, model: Type[~_T], options: openai._models.FinalRequestOptions) -> NoneType
63,979
openai._base_client
get_next_page
null
def get_next_page(self: SyncPageT) -> SyncPageT: info = self.next_page_info() if not info: raise RuntimeError( "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." ) options = self._info_to_options(info) return self._client._request_api_list(self._model, page=self.__class__, options=options)
(self: ~SyncPageT) -> ~SyncPageT
63,980
openai._base_client
has_next_page
null
def has_next_page(self) -> bool: items = self._get_page_items() if not items: return False return self.next_page_info() is not None
(self) -> bool
63,981
openai._base_client
iter_pages
null
def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: page = self while True: yield page if page.has_next_page(): page = page.get_next_page() else: return
(self: ~SyncPageT) -> Iterator[~SyncPageT]
63,986
pydantic._internal._model_construction
wrapped_model_post_init
We need to both initialize private attributes and call the user-defined model_post_init method.
def __new__( mcs, cls_name: str, bases: tuple[type[Any], ...], namespace: dict[str, Any], __pydantic_generic_metadata__: PydanticGenericMetadata | None = None, __pydantic_reset_parent_namespace__: bool = True, _create_model_module: str | None = None, **kwargs: Any, ) -> type: """Metaclass for creating Pydantic models. Args: cls_name: The name of the class to be created. bases: The base classes of the class to be created. namespace: The attribute dictionary of the class to be created. __pydantic_generic_metadata__: Metadata for generic models. __pydantic_reset_parent_namespace__: Reset parent namespace. _create_model_module: The module of the class to be created, if created by `create_model`. **kwargs: Catch-all for any other keyword arguments. Returns: The new class created by the metaclass. """ # Note `ModelMetaclass` refers to `BaseModel`, but is also used to *create* `BaseModel`, so we rely on the fact # that `BaseModel` itself won't have any bases, but any subclass of it will, to determine whether the `__new__` # call we're in the middle of is for the `BaseModel` class. if bases: base_field_names, class_vars, base_private_attributes = mcs._collect_bases_data(bases) config_wrapper = ConfigWrapper.for_model(bases, namespace, kwargs) namespace['model_config'] = config_wrapper.config_dict private_attributes = inspect_namespace( namespace, config_wrapper.ignored_types, class_vars, base_field_names ) if private_attributes or base_private_attributes: original_model_post_init = get_model_post_init(namespace, bases) if original_model_post_init is not None: # if there are private_attributes and a model_post_init function, we handle both def wrapped_model_post_init(self: BaseModel, __context: Any) -> None: """We need to both initialize private attributes and call the user-defined model_post_init method. """ init_private_attributes(self, __context) original_model_post_init(self, __context) namespace['model_post_init'] = wrapped_model_post_init else: namespace['model_post_init'] = init_private_attributes namespace['__class_vars__'] = class_vars namespace['__private_attributes__'] = {**base_private_attributes, **private_attributes} cls: type[BaseModel] = super().__new__(mcs, cls_name, bases, namespace, **kwargs) # type: ignore from ..main import BaseModel mro = cls.__mro__ if Generic in mro and mro.index(Generic) < mro.index(BaseModel): warnings.warn( GenericBeforeBaseModelWarning( 'Classes should inherit from `BaseModel` before generic classes (e.g. `typing.Generic[T]`) ' 'for pydantic generics to work properly.' ), stacklevel=2, ) cls.__pydantic_custom_init__ = not getattr(cls.__init__, '__pydantic_base_init__', False) cls.__pydantic_post_init__ = None if cls.model_post_init is BaseModel.model_post_init else 'model_post_init' cls.__pydantic_decorators__ = DecoratorInfos.build(cls) # Use the getattr below to grab the __parameters__ from the `typing.Generic` parent class if __pydantic_generic_metadata__: cls.__pydantic_generic_metadata__ = __pydantic_generic_metadata__ else: parent_parameters = getattr(cls, '__pydantic_generic_metadata__', {}).get('parameters', ()) parameters = getattr(cls, '__parameters__', None) or parent_parameters if parameters and parent_parameters and not all(x in parameters for x in parent_parameters): from ..root_model import RootModelRootType missing_parameters = tuple(x for x in parameters if x not in parent_parameters) if RootModelRootType in parent_parameters and RootModelRootType not in parameters: # This is a special case where the user has subclassed `RootModel`, but has not parametrized # RootModel with the generic type identifiers being used. Ex: # class MyModel(RootModel, Generic[T]): # root: T # Should instead just be: # class MyModel(RootModel[T]): # root: T parameters_str = ', '.join([x.__name__ for x in missing_parameters]) error_message = ( f'{cls.__name__} is a subclass of `RootModel`, but does not include the generic type identifier(s) ' f'{parameters_str} in its parameters. ' f'You should parametrize RootModel directly, e.g., `class {cls.__name__}(RootModel[{parameters_str}]): ...`.' ) else: combined_parameters = parent_parameters + missing_parameters parameters_str = ', '.join([str(x) for x in combined_parameters]) generic_type_label = f'typing.Generic[{parameters_str}]' error_message = ( f'All parameters must be present on typing.Generic;' f' you should inherit from {generic_type_label}.' ) if Generic not in bases: # pragma: no cover # We raise an error here not because it is desirable, but because some cases are mishandled. # It would be nice to remove this error and still have things behave as expected, it's just # challenging because we are using a custom `__class_getitem__` to parametrize generic models, # and not returning a typing._GenericAlias from it. bases_str = ', '.join([x.__name__ for x in bases] + [generic_type_label]) error_message += ( f' Note: `typing.Generic` must go last: `class {cls.__name__}({bases_str}): ...`)' ) raise TypeError(error_message) cls.__pydantic_generic_metadata__ = { 'origin': None, 'args': (), 'parameters': parameters, } cls.__pydantic_complete__ = False # Ensure this specific class gets completed # preserve `__set_name__` protocol defined in https://peps.python.org/pep-0487 # for attributes not in `new_namespace` (e.g. private attributes) for name, obj in private_attributes.items(): obj.__set_name__(cls, name) if __pydantic_reset_parent_namespace__: cls.__pydantic_parent_namespace__ = build_lenient_weakvaluedict(parent_frame_namespace()) parent_namespace = getattr(cls, '__pydantic_parent_namespace__', None) if isinstance(parent_namespace, dict): parent_namespace = unpack_lenient_weakvaluedict(parent_namespace) types_namespace = get_cls_types_namespace(cls, parent_namespace) set_model_fields(cls, bases, config_wrapper, types_namespace) if config_wrapper.frozen and '__hash__' not in namespace: set_default_hash_func(cls, bases) complete_model_class( cls, cls_name, config_wrapper, raise_errors=False, types_namespace=types_namespace, create_model_module=_create_model_module, ) # If this is placed before the complete_model_class call above, # the generic computed fields return type is set to PydanticUndefined cls.model_computed_fields = {k: v.info for k, v in cls.__pydantic_decorators__.computed_fields.items()} set_deprecated_descriptors(cls) # using super(cls, cls) on the next line ensures we only call the parent class's __pydantic_init_subclass__ # I believe the `type: ignore` is only necessary because mypy doesn't realize that this code branch is # only hit for _proper_ subclasses of BaseModel super(cls, cls).__pydantic_init_subclass__(**kwargs) # type: ignore[misc] return cls else: # this is the BaseModel class itself being created, no logic required return super().__new__(mcs, cls_name, bases, namespace, **kwargs)
(self: 'BaseModel', _ModelMetaclass__context: 'Any') -> 'None'
63,987
openai.pagination
next_page_info
null
@override def next_page_info(self) -> Optional[PageInfo]: data = self.data if not data: return None item = cast(Any, data[-1]) if not isinstance(item, CursorPageItem) or item.id is None: # TODO emit warning log return None return PageInfo(params={"after": item.id})
(self) -> Optional[openai._base_client.PageInfo]
63,990
litellm.utils
TextChoices
null
class TextChoices(OpenAIObject): def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params): super(TextChoices, self).__init__(**params) if finish_reason: self.finish_reason = map_finish_reason(finish_reason) else: self.finish_reason = None self.index = index if text is not None: self.text = text else: self.text = None if logprobs is None: self.logprobs = None else: if isinstance(logprobs, dict): self.logprobs = Logprobs(**logprobs) else: self.logprobs = logprobs def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value) def json(self, **kwargs): try: return self.model_dump() # noqa except: # if using pydantic v1 return self.dict()
(finish_reason=None, index=0, text=None, logprobs=None, **params)
63,999
litellm.utils
__init__
null
def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params): super(TextChoices, self).__init__(**params) if finish_reason: self.finish_reason = map_finish_reason(finish_reason) else: self.finish_reason = None self.index = index if text is not None: self.text = text else: self.text = None if logprobs is None: self.logprobs = None else: if isinstance(logprobs, dict): self.logprobs = Logprobs(**logprobs) else: self.logprobs = logprobs
(self, finish_reason=None, index=0, text=None, logprobs=None, **params)
64,025
litellm.utils
TextCompletionResponse
{ "id": response["id"], "object": "text_completion", "created": response["created"], "model": response["model"], "choices": [ { "text": response["choices"][0]["message"]["content"], "index": response["choices"][0]["index"], "logprobs": transformed_logprobs, "finish_reason": response["choices"][0]["finish_reason"] } ], "usage": response["usage"] }
class TextCompletionResponse(OpenAIObject): """ { "id": response["id"], "object": "text_completion", "created": response["created"], "model": response["model"], "choices": [ { "text": response["choices"][0]["message"]["content"], "index": response["choices"][0]["index"], "logprobs": transformed_logprobs, "finish_reason": response["choices"][0]["finish_reason"] } ], "usage": response["usage"] } """ id: str object: str created: int model: Optional[str] choices: List[TextChoices] usage: Optional[Usage] _response_ms: Optional[int] = None _hidden_params: HiddenParams def __init__( self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, object=None, **params, ): if stream: object = "text_completion.chunk" choices = [TextChoices()] else: object = "text_completion" if choices is not None and isinstance(choices, list): new_choices = [] for choice in choices: if isinstance(choice, TextChoices): _new_choice = choice elif isinstance(choice, dict): _new_choice = TextChoices(**choice) new_choices.append(_new_choice) choices = new_choices else: choices = [TextChoices()] if object is not None: object = object if id is None: id = _generate_id() else: id = id if created is None: created = int(time.time()) else: created = created model = model if usage: usage = usage else: usage = Usage() super(TextCompletionResponse, self).__init__( id=id, object=object, created=created, model=model, choices=choices, usage=usage, **params, ) if response_ms: self._response_ms = response_ms else: self._response_ms = None self._hidden_params = HiddenParams() def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, object=None, **params)
64,034
litellm.utils
__init__
null
def __init__( self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, object=None, **params, ): if stream: object = "text_completion.chunk" choices = [TextChoices()] else: object = "text_completion" if choices is not None and isinstance(choices, list): new_choices = [] for choice in choices: if isinstance(choice, TextChoices): _new_choice = choice elif isinstance(choice, dict): _new_choice = TextChoices(**choice) new_choices.append(_new_choice) choices = new_choices else: choices = [TextChoices()] if object is not None: object = object if id is None: id = _generate_id() else: id = id if created is None: created = int(time.time()) else: created = created model = model if usage: usage = usage else: usage = Usage() super(TextCompletionResponse, self).__init__( id=id, object=object, created=created, model=model, choices=choices, usage=usage, **params, ) if response_ms: self._response_ms = response_ms else: self._response_ms = None self._hidden_params = HiddenParams()
(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, object=None, **params)
64,060
litellm.utils
TextCompletionStreamWrapper
null
class TextCompletionStreamWrapper: def __init__(self, completion_stream, model, stream_options: Optional[dict] = None): self.completion_stream = completion_stream self.model = model self.stream_options = stream_options def __iter__(self): return self def __aiter__(self): return self def convert_to_text_completion_object(self, chunk: ModelResponse): try: response = TextCompletionResponse() response["id"] = chunk.get("id", None) response["object"] = "text_completion" response["created"] = chunk.get("created", None) response["model"] = chunk.get("model", None) text_choices = TextChoices() if isinstance( chunk, Choices ): # chunk should always be of type StreamingChoices raise Exception text_choices["text"] = chunk["choices"][0]["delta"]["content"] text_choices["index"] = chunk["choices"][0]["index"] text_choices["finish_reason"] = chunk["choices"][0]["finish_reason"] response["choices"] = [text_choices] # only pass usage when stream_options["include_usage"] is True if ( self.stream_options and self.stream_options.get("include_usage", False) == True ): response["usage"] = chunk.get("usage", None) return response except Exception as e: raise Exception( f"Error occurred converting to text completion object - chunk: {chunk}; Error: {str(e)}" ) def __next__(self): # model_response = ModelResponse(stream=True, model=self.model) response = TextCompletionResponse() try: for chunk in self.completion_stream: if chunk == "None" or chunk is None: raise Exception processed_chunk = self.convert_to_text_completion_object(chunk=chunk) return processed_chunk raise StopIteration except StopIteration: raise StopIteration except Exception as e: print(f"got exception {e}") # noqa async def __anext__(self): try: async for chunk in self.completion_stream: if chunk == "None" or chunk is None: raise Exception processed_chunk = self.convert_to_text_completion_object(chunk=chunk) return processed_chunk raise StopIteration except StopIteration: raise StopAsyncIteration
(completion_stream, model, stream_options: Optional[dict] = None)
64,062
litellm.utils
__anext__
null
def __next__(self): # model_response = ModelResponse(stream=True, model=self.model) response = TextCompletionResponse() try: for chunk in self.completion_stream: if chunk == "None" or chunk is None: raise Exception processed_chunk = self.convert_to_text_completion_object(chunk=chunk) return processed_chunk raise StopIteration except StopIteration: raise StopIteration except Exception as e: print(f"got exception {e}") # noqa
(self)
64,063
litellm.utils
__init__
null
def __init__(self, completion_stream, model, stream_options: Optional[dict] = None): self.completion_stream = completion_stream self.model = model self.stream_options = stream_options
(self, completion_stream, model, stream_options: Optional[dict] = None)
64,066
litellm.utils
convert_to_text_completion_object
null
def convert_to_text_completion_object(self, chunk: ModelResponse): try: response = TextCompletionResponse() response["id"] = chunk.get("id", None) response["object"] = "text_completion" response["created"] = chunk.get("created", None) response["model"] = chunk.get("model", None) text_choices = TextChoices() if isinstance( chunk, Choices ): # chunk should always be of type StreamingChoices raise Exception text_choices["text"] = chunk["choices"][0]["delta"]["content"] text_choices["index"] = chunk["choices"][0]["index"] text_choices["finish_reason"] = chunk["choices"][0]["finish_reason"] response["choices"] = [text_choices] # only pass usage when stream_options["include_usage"] is True if ( self.stream_options and self.stream_options.get("include_usage", False) == True ): response["usage"] = chunk.get("usage", None) return response except Exception as e: raise Exception( f"Error occurred converting to text completion object - chunk: {chunk}; Error: {str(e)}" )
(self, chunk: litellm.utils.ModelResponse)
64,067
litellm.types.llms.openai
Thread
null
class Thread(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the thread was created.""" metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ object: Literal["thread"] """The object type, which is always `thread`."""
(*, id: str, created_at: int, metadata: Optional[object] = None, object: Literal['thread']) -> None
64,105
litellm.exceptions
Timeout
null
class Timeout(openai.APITimeoutError): # type: ignore def __init__(self, message, model, llm_provider): request = httpx.Request(method="POST", url="https://api.openai.com/v1") super().__init__( request=request ) # Call the base class constructor with the parameters it needs self.status_code = 408 self.message = message self.model = model self.llm_provider = llm_provider # custom function to convert to str def __str__(self): return str(self.message)
(message, model, llm_provider)
64,106
litellm.exceptions
__init__
null
def __init__(self, message, model, llm_provider): request = httpx.Request(method="POST", url="https://api.openai.com/v1") super().__init__( request=request ) # Call the base class constructor with the parameters it needs self.status_code = 408 self.message = message self.model = model self.llm_provider = llm_provider
(self, message, model, llm_provider)
64,108
litellm.llms.together_ai
TogetherAIConfig
Reference: https://docs.together.ai/reference/inference The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters: - `max_tokens` (int32, required): The maximum number of tokens to generate. - `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, " " will stop generation as soon as the model generates two newlines. - `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output. - `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text. - `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. - `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. - `logprobs` (int32, optional): This parameter is not described in the prompt.
class TogetherAIConfig: """ Reference: https://docs.together.ai/reference/inference The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters: - `max_tokens` (int32, required): The maximum number of tokens to generate. - `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, "\n\n" will stop generation as soon as the model generates two newlines. - `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output. - `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text. - `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. - `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. - `logprobs` (int32, optional): This parameter is not described in the prompt. """ max_tokens: Optional[int] = None stop: Optional[str] = None temperature: Optional[int] = None top_p: Optional[float] = None top_k: Optional[int] = None repetition_penalty: Optional[float] = None logprobs: Optional[int] = None def __init__( self, max_tokens: Optional[int] = None, stop: Optional[str] = None, temperature: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, logprobs: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens: Optional[int] = None, stop: Optional[str] = None, temperature: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, logprobs: Optional[int] = None) -> None
64,109
litellm.llms.together_ai
__init__
null
def __init__( self, max_tokens: Optional[int] = None, stop: Optional[str] = None, temperature: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, logprobs: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, stop: Optional[str] = None, temperature: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, logprobs: Optional[int] = None) -> NoneType
64,110
litellm.types.llms.openai
ToolResourcesCodeInterpreter
null
class ToolResourcesCodeInterpreter(TypedDict, total=False): file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """
null
64,111
litellm.types.llms.openai
ToolResourcesFileSearch
null
class ToolResourcesFileSearch(TypedDict, total=False): vector_store_ids: List[str] """ The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. """ vector_stores: Iterable[ToolResourcesFileSearchVectorStore] """ A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. """
null
64,112
litellm.types.llms.openai
ToolResourcesFileSearchVectorStore
null
class ToolResourcesFileSearchVectorStore(TypedDict, total=False): file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. """ metadata: object """Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """
null
64,113
litellm.utils
TranscriptionResponse
null
class TranscriptionResponse(OpenAIObject): text: Optional[str] = None _hidden_params: dict = {} def __init__(self, text=None): super().__init__(text=text) def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value) def json(self, **kwargs): try: return self.model_dump() # noqa except: # if using pydantic v1 return self.dict()
(text=None)
64,122
litellm.utils
__init__
null
def __init__(self, text=None): super().__init__(text=text)
(self, text=None)
64,148
litellm.llms.triton
TritonChatCompletion
null
class TritonChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() async def aembedding( self, data: dict, model_response: litellm.utils.EmbeddingResponse, api_base: str, logging_obj=None, api_key: Optional[str] = None, ): async_handler = AsyncHTTPHandler( timeout=httpx.Timeout(timeout=600.0, connect=5.0) ) response = await async_handler.post(url=api_base, data=json.dumps(data)) if response.status_code != 200: raise TritonError(status_code=response.status_code, message=response.text) _text_response = response.text logging_obj.post_call(original_response=_text_response) _json_response = response.json() _outputs = _json_response["outputs"] _output_data = _outputs[0]["data"] _embedding_output = { "object": "embedding", "index": 0, "embedding": _output_data, } model_response.model = _json_response.get("model_name", "None") model_response.data = [_embedding_output] return model_response def embedding( self, model: str, input: list, timeout: float, api_base: str, model_response: litellm.utils.EmbeddingResponse, api_key: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aembedding=None, ): data_for_triton = { "inputs": [ { "name": "input_text", "shape": [1], "datatype": "BYTES", "data": input, } ] } ## LOGGING curl_string = f"curl {api_base} -X POST -H 'Content-Type: application/json' -d '{data_for_triton}'" logging_obj.pre_call( input="", api_key=None, additional_args={ "complete_input_dict": optional_params, "request_str": curl_string, }, ) if aembedding == True: response = self.aembedding( data=data_for_triton, model_response=model_response, logging_obj=logging_obj, api_base=api_base, api_key=api_key, ) return response else: raise Exception( "Only async embedding supported for triton, please use litellm.aembedding() for now" )
() -> None
64,156
litellm.llms.triton
embedding
null
def embedding( self, model: str, input: list, timeout: float, api_base: str, model_response: litellm.utils.EmbeddingResponse, api_key: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aembedding=None, ): data_for_triton = { "inputs": [ { "name": "input_text", "shape": [1], "datatype": "BYTES", "data": input, } ] } ## LOGGING curl_string = f"curl {api_base} -X POST -H 'Content-Type: application/json' -d '{data_for_triton}'" logging_obj.pre_call( input="", api_key=None, additional_args={ "complete_input_dict": optional_params, "request_str": curl_string, }, ) if aembedding == True: response = self.aembedding( data=data_for_triton, model_response=model_response, logging_obj=logging_obj, api_base=api_base, api_key=api_key, ) return response else: raise Exception( "Only async embedding supported for triton, please use litellm.aembedding() for now" )
(self, model: str, input: list, timeout: float, api_base: str, model_response: litellm.utils.EmbeddingResponse, api_key: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aembedding=None)
64,160
litellm.exceptions
UnprocessableEntityError
null
class UnprocessableEntityError(openai.UnprocessableEntityError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 422 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(message, model, llm_provider, response: httpx.Response)
64,161
litellm.exceptions
__init__
null
def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 422 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(self, message, model, llm_provider, response: httpx.Response)
64,162
litellm.types.router
UpdateRouterConfig
Set of params that you can modify via `router.update_settings()`.
class UpdateRouterConfig(BaseModel): """ Set of params that you can modify via `router.update_settings()`. """ routing_strategy_args: Optional[dict] = None routing_strategy: Optional[str] = None model_group_retry_policy: Optional[dict] = None allowed_fails: Optional[int] = None cooldown_time: Optional[float] = None num_retries: Optional[int] = None timeout: Optional[float] = None max_retries: Optional[int] = None retry_after: Optional[float] = None fallbacks: Optional[List[dict]] = None context_window_fallbacks: Optional[List[dict]] = None class Config: protected_namespaces = ()
(*, routing_strategy_args: Optional[dict] = None, routing_strategy: Optional[str] = None, model_group_retry_policy: Optional[dict] = None, allowed_fails: Optional[int] = None, cooldown_time: Optional[float] = None, num_retries: Optional[int] = None, timeout: Optional[float] = None, max_retries: Optional[int] = None, retry_after: Optional[float] = None, fallbacks: Optional[List[dict]] = None, context_window_fallbacks: Optional[List[dict]] = None) -> None
64,191
litellm.utils
Usage
null
class Usage(OpenAIObject): def __init__( self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params ): super(Usage, self).__init__(**params) if prompt_tokens: self.prompt_tokens = prompt_tokens if completion_tokens: self.completion_tokens = completion_tokens if total_tokens: self.total_tokens = total_tokens def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(prompt_tokens=None, completion_tokens=None, total_tokens=None, **params)
64,200
litellm.utils
__init__
null
def __init__( self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params ): super(Usage, self).__init__(**params) if prompt_tokens: self.prompt_tokens = prompt_tokens if completion_tokens: self.completion_tokens = completion_tokens if total_tokens: self.total_tokens = total_tokens
(self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params)
64,226
litellm.llms.vertex_ai_anthropic
VertexAIAnthropicConfig
Reference: https://docs.anthropic.com/claude/reference/messages_post Note that the API for Claude on Vertex differs from the Anthropic API documentation in the following ways: - `model` is not a valid parameter. The model is instead specified in the Google Cloud endpoint URL. - `anthropic_version` is a required parameter and must be set to "vertex-2023-10-16". The class `VertexAIAnthropicConfig` provides configuration for the VertexAI's Anthropic API interface. Below are the parameters: - `max_tokens` Required (integer) max tokens, - `anthropic_version` Required (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31" - `system` Optional (string) the system prompt, conversion from openai format to this is handled in factory.py - `temperature` Optional (float) The amount of randomness injected into the response - `top_p` Optional (float) Use nucleus sampling. - `top_k` Optional (int) Only sample from the top K options for each subsequent token - `stop_sequences` Optional (List[str]) Custom text sequences that cause the model to stop generating Note: Please make sure to modify the default parameters as required for your use case.
class VertexAIAnthropicConfig: """ Reference: https://docs.anthropic.com/claude/reference/messages_post Note that the API for Claude on Vertex differs from the Anthropic API documentation in the following ways: - `model` is not a valid parameter. The model is instead specified in the Google Cloud endpoint URL. - `anthropic_version` is a required parameter and must be set to "vertex-2023-10-16". The class `VertexAIAnthropicConfig` provides configuration for the VertexAI's Anthropic API interface. Below are the parameters: - `max_tokens` Required (integer) max tokens, - `anthropic_version` Required (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31" - `system` Optional (string) the system prompt, conversion from openai format to this is handled in factory.py - `temperature` Optional (float) The amount of randomness injected into the response - `top_p` Optional (float) Use nucleus sampling. - `top_k` Optional (int) Only sample from the top K options for each subsequent token - `stop_sequences` Optional (List[str]) Custom text sequences that cause the model to stop generating Note: Please make sure to modify the default parameters as required for your use case. """ max_tokens: Optional[int] = ( 4096 # anthropic max - setting this doesn't impact response, but is required by anthropic. ) system: Optional[str] = None temperature: Optional[float] = None top_p: Optional[float] = None top_k: Optional[int] = None stop_sequences: Optional[List[str]] = None def __init__( self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key == "max_tokens" and value is None: value = self.max_tokens if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "max_tokens", "tools", "tool_choice", "stream", "stop", "temperature", "top_p", ] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream": optional_params["stream"] = value if param == "stop": optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value return optional_params
(max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None) -> None
64,227
litellm.llms.vertex_ai_anthropic
__init__
null
def __init__( self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key == "max_tokens" and value is None: value = self.max_tokens if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None) -> NoneType
64,228
litellm.llms.vertex_ai_anthropic
get_supported_openai_params
null
def get_supported_openai_params(self): return [ "max_tokens", "tools", "tool_choice", "stream", "stop", "temperature", "top_p", ]
(self)
64,230
litellm.llms.vertex_ai
VertexAIConfig
Reference: https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts Reference: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference The class `VertexAIConfig` provides configuration for the VertexAI's API interface. Below are the parameters: - `temperature` (float): This controls the degree of randomness in token selection. - `max_output_tokens` (integer): This sets the limitation for the maximum amount of token in the text output. In this case, the default value is 256. - `top_p` (float): The tokens are selected from the most probable to the least probable until the sum of their probabilities equals the `top_p` value. Default is 0.95. - `top_k` (integer): The value of `top_k` determines how many of the most probable tokens are considered in the selection. For example, a `top_k` of 1 means the selected token is the most probable among all tokens. The default value is 40. - `response_mime_type` (str): The MIME type of the response. The default value is 'text/plain'. - `candidate_count` (int): Number of generated responses to return. - `stop_sequences` (List[str]): The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. - `frequency_penalty` (float): This parameter is used to penalize the model from repeating the same output. The default value is 0.0. - `presence_penalty` (float): This parameter is used to penalize the model from generating the same output as the input. The default value is 0.0. Note: Please make sure to modify the default parameters as required for your use case.
class VertexAIConfig: """ Reference: https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts Reference: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference The class `VertexAIConfig` provides configuration for the VertexAI's API interface. Below are the parameters: - `temperature` (float): This controls the degree of randomness in token selection. - `max_output_tokens` (integer): This sets the limitation for the maximum amount of token in the text output. In this case, the default value is 256. - `top_p` (float): The tokens are selected from the most probable to the least probable until the sum of their probabilities equals the `top_p` value. Default is 0.95. - `top_k` (integer): The value of `top_k` determines how many of the most probable tokens are considered in the selection. For example, a `top_k` of 1 means the selected token is the most probable among all tokens. The default value is 40. - `response_mime_type` (str): The MIME type of the response. The default value is 'text/plain'. - `candidate_count` (int): Number of generated responses to return. - `stop_sequences` (List[str]): The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. - `frequency_penalty` (float): This parameter is used to penalize the model from repeating the same output. The default value is 0.0. - `presence_penalty` (float): This parameter is used to penalize the model from generating the same output as the input. The default value is 0.0. Note: Please make sure to modify the default parameters as required for your use case. """ temperature: Optional[float] = None max_output_tokens: Optional[int] = None top_p: Optional[float] = None top_k: Optional[int] = None response_mime_type: Optional[str] = None candidate_count: Optional[int] = None stop_sequences: Optional[list] = None frequency_penalty: Optional[float] = None presence_penalty: Optional[float] = None def __init__( self, temperature: Optional[float] = None, max_output_tokens: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, response_mime_type: Optional[str] = None, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "temperature", "top_p", "max_tokens", "stream", "tools", "tool_choice", "response_format", "n", "stop", ] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if ( param == "stream" and value == True ): # sending stream = False, can cause it to get passed unchecked and raise issues optional_params["stream"] = value if param == "n": optional_params["candidate_count"] = value if param == "stop": if isinstance(value, str): optional_params["stop_sequences"] = [value] elif isinstance(value, list): optional_params["stop_sequences"] = value if param == "max_tokens": optional_params["max_output_tokens"] = value if param == "response_format" and value["type"] == "json_object": optional_params["response_mime_type"] = "application/json" if param == "frequency_penalty": optional_params["frequency_penalty"] = value if param == "presence_penalty": optional_params["presence_penalty"] = value if param == "tools" and isinstance(value, list): from vertexai.preview import generative_models gtool_func_declarations = [] for tool in value: gtool_func_declaration = generative_models.FunctionDeclaration( name=tool["function"]["name"], description=tool["function"].get("description", ""), parameters=tool["function"].get("parameters", {}), ) gtool_func_declarations.append(gtool_func_declaration) optional_params["tools"] = [ generative_models.Tool( function_declarations=gtool_func_declarations ) ] if param == "tool_choice" and ( isinstance(value, str) or isinstance(value, dict) ): pass return optional_params def get_mapped_special_auth_params(self) -> dict: """ Common auth params across bedrock/vertex_ai/azure/watsonx """ return {"project": "vertex_project", "region_name": "vertex_location"} def map_special_auth_params(self, non_default_params: dict, optional_params: dict): mapped_params = self.get_mapped_special_auth_params() for param, value in non_default_params.items(): if param in mapped_params: optional_params[mapped_params[param]] = value return optional_params def get_eu_regions(self) -> List[str]: """ Source: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions """ return [ "europe-central2", "europe-north1", "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", "europe-west4", "europe-west6", "europe-west8", "europe-west9", ]
(temperature: Optional[float] = None, max_output_tokens: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, response_mime_type: Optional[str] = None, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None) -> None
64,231
litellm.llms.vertex_ai
__init__
null
def __init__( self, temperature: Optional[float] = None, max_output_tokens: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, response_mime_type: Optional[str] = None, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, temperature: Optional[float] = None, max_output_tokens: Optional[int] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, response_mime_type: Optional[str] = None, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None) -> NoneType
64,232
litellm.llms.vertex_ai
get_eu_regions
Source: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
def get_eu_regions(self) -> List[str]: """ Source: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions """ return [ "europe-central2", "europe-north1", "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", "europe-west4", "europe-west6", "europe-west8", "europe-west9", ]
(self) -> List[str]
64,233
litellm.llms.vertex_ai
get_mapped_special_auth_params
Common auth params across bedrock/vertex_ai/azure/watsonx
def get_mapped_special_auth_params(self) -> dict: """ Common auth params across bedrock/vertex_ai/azure/watsonx """ return {"project": "vertex_project", "region_name": "vertex_location"}
(self) -> dict
64,234
litellm.llms.vertex_ai
get_supported_openai_params
null
def get_supported_openai_params(self): return [ "temperature", "top_p", "max_tokens", "stream", "tools", "tool_choice", "response_format", "n", "stop", ]
(self)
64,235
litellm.llms.vertex_ai
map_openai_params
null
def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if ( param == "stream" and value == True ): # sending stream = False, can cause it to get passed unchecked and raise issues optional_params["stream"] = value if param == "n": optional_params["candidate_count"] = value if param == "stop": if isinstance(value, str): optional_params["stop_sequences"] = [value] elif isinstance(value, list): optional_params["stop_sequences"] = value if param == "max_tokens": optional_params["max_output_tokens"] = value if param == "response_format" and value["type"] == "json_object": optional_params["response_mime_type"] = "application/json" if param == "frequency_penalty": optional_params["frequency_penalty"] = value if param == "presence_penalty": optional_params["presence_penalty"] = value if param == "tools" and isinstance(value, list): from vertexai.preview import generative_models gtool_func_declarations = [] for tool in value: gtool_func_declaration = generative_models.FunctionDeclaration( name=tool["function"]["name"], description=tool["function"].get("description", ""), parameters=tool["function"].get("parameters", {}), ) gtool_func_declarations.append(gtool_func_declaration) optional_params["tools"] = [ generative_models.Tool( function_declarations=gtool_func_declarations ) ] if param == "tool_choice" and ( isinstance(value, str) or isinstance(value, dict) ): pass return optional_params
(self, non_default_params: dict, optional_params: dict)
64,237
litellm.utils
_calculate_retry_after
Reimplementation of openai's calculate retry after, since that one can't be imported. https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L631
def _calculate_retry_after( remaining_retries: int, max_retries: int, response_headers: Optional[httpx.Headers] = None, min_timeout: int = 0, ): """ Reimplementation of openai's calculate retry after, since that one can't be imported. https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L631 """ try: import email # openai import # About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After # # <http-date>". See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax for # details. if response_headers is not None: retry_header = response_headers.get("retry-after") try: retry_after = int(retry_header) except Exception: retry_date_tuple = email.utils.parsedate_tz(retry_header) # type: ignore if retry_date_tuple is None: retry_after = -1 else: retry_date = email.utils.mktime_tz(retry_date_tuple) # type: ignore retry_after = int(retry_date - time.time()) else: retry_after = -1 except Exception: retry_after = -1 # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. if 0 < retry_after <= 60: return retry_after initial_retry_delay = 0.5 max_retry_delay = 8.0 nb_retries = max_retries - remaining_retries # Apply exponential backoff, but not more than the max. sleep_seconds = min(initial_retry_delay * pow(2.0, nb_retries), max_retry_delay) # Apply some jitter, plus-or-minus half a second. jitter = 1 - 0.25 * random.random() timeout = sleep_seconds * jitter return timeout if timeout >= min_timeout else min_timeout
(remaining_retries: int, max_retries: int, response_headers: Optional[httpx.Headers] = None, min_timeout: int = 0)
64,240
litellm.utils
_should_retry
Reimplementation of openai's should retry logic, since that one can't be imported. https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L639
def _should_retry(status_code: int): """ Reimplementation of openai's should retry logic, since that one can't be imported. https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L639 """ # If the server explicitly says whether or not to retry, obey. # Retry on request timeouts. if status_code == 408: return True # Retry on lock timeouts. if status_code == 409: return True # Retry on rate limits. if status_code == 429: return True # Retry internal errors. if status_code >= 500: return True return False
(status_code: int)
64,241
litellm._logging
_turn_on_debug
null
def _turn_on_debug(): verbose_logger.setLevel(level=logging.DEBUG) # set package log to debug verbose_router_logger.setLevel(level=logging.DEBUG) # set router logs to debug verbose_proxy_logger.setLevel(level=logging.DEBUG) # set proxy logs to debug
()
64,242
litellm.main
acompletion
Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly) Parameters: model (str): The name of the language model to use for text completion. see all supported LLMs: https://docs.litellm.ai/docs/providers/ messages (List): A list of message objects representing the conversation context (default is an empty list). OPTIONAL PARAMS functions (List, optional): A list of functions to apply to the conversation messages (default is an empty list). function_call (str, optional): The name of the function to call within the conversation (default is an empty string). temperature (float, optional): The temperature parameter for controlling the randomness of the output (default is 1.0). top_p (float, optional): The top-p parameter for nucleus sampling (default is 1.0). n (int, optional): The number of completions to generate (default is 1). stream (bool, optional): If True, return a streaming response (default is False). stream_options (dict, optional): A dictionary containing options for the streaming response. Only use this if stream is True. stop(string/list, optional): - Up to 4 sequences where the LLM API will stop generating further tokens. max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity). presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far. frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far. logit_bias (dict, optional): Used to modify the probability of specific tokens appearing in the completion. user (str, optional): A unique identifier representing your end-user. This can help the LLM provider to monitor and detect abuse. metadata (dict, optional): Pass in additional metadata to tag your completion calls - eg. prompt version, details, etc. api_base (str, optional): Base URL for the API (default is None). api_version (str, optional): API version (default is None). api_key (str, optional): API key (default is None). model_list (list, optional): List of api base, version, keys timeout (float, optional): The maximum execution time in seconds for the completion request. LITELLM Specific Params mock_response (str, optional): If provided, return a mock completion response for testing or debugging purposes (default is None). custom_llm_provider (str, optional): Used for Non-OpenAI LLMs, Example usage for bedrock, set model="amazon.titan-tg1-large" and custom_llm_provider="bedrock" Returns: ModelResponse: A response object containing the generated completion and associated metadata. Notes: - This function is an asynchronous version of the `completion` function. - The `completion` function is called using `run_in_executor` to execute synchronously in the event loop. - If `stream` is True, the function returns an async generator that yields completion lines.
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(model: str, messages: List = [], functions: Optional[List] = None, function_call: Optional[str] = None, timeout: Union[float, int, NoneType] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, stream: Optional[bool] = None, stream_options: Optional[dict] = None, stop=None, max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, user: Optional[str] = None, response_format: Optional[dict] = None, seed: Optional[int] = None, tools: Optional[List] = None, tool_choice: Optional[str] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, deployment_id=None, base_url: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, model_list: Optional[list] = None, extra_headers: Optional[dict] = None, **kwargs)
64,243
litellm.main
acompletion_with_retries
Executes a litellm.completion() with 3 retries
def completion_with_retries(*args, **kwargs): """ Executes a litellm.completion() with 3 retries """ try: import tenacity except Exception as e: raise Exception( f"tenacity import failed please run `pip install tenacity`. Error{e}" ) num_retries = kwargs.pop("num_retries", 3) retry_strategy = kwargs.pop("retry_strategy", "constant_retry") original_function = kwargs.pop("original_function", completion) if retry_strategy == "constant_retry": retryer = tenacity.Retrying( stop=tenacity.stop_after_attempt(num_retries), reraise=True ) elif retry_strategy == "exponential_backoff_retry": retryer = tenacity.Retrying( wait=tenacity.wait_exponential(multiplier=1, max=10), stop=tenacity.stop_after_attempt(num_retries), reraise=True, ) return retryer(original_function, *args, **kwargs)
(*args, **kwargs)
64,244
litellm.utils
acreate
null
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call return litellm.acompletion(*args, **kwargs)
(*args, **kwargs)
64,245
litellm.assistants.main
add_message
null
def add_message( custom_llm_provider: Literal["openai"], thread_id: str, role: Literal["user", "assistant"], content: str, attachments: Optional[List[Attachment]] = None, metadata: Optional[dict] = None, client: Optional[OpenAI] = None, **kwargs, ) -> OpenAIMessage: ### COMMON OBJECTS ### message_data = MessageData( role=role, content=content, attachments=attachments, metadata=metadata ) optional_params = GenericLiteLLMParams(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default if ( timeout is not None and isinstance(timeout, httpx.Timeout) and supports_httpx_timeout(custom_llm_provider) == False ): read_timeout = timeout.read or 600 timeout = read_timeout # default 10 min timeout elif timeout is not None and not isinstance(timeout, httpx.Timeout): timeout = float(timeout) # type: ignore elif timeout is None: timeout = 600.0 response: Optional[OpenAIMessage] = None if custom_llm_provider == "openai": api_base = ( optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there or litellm.api_base or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1" ) organization = ( optional_params.organization or litellm.organization or os.getenv("OPENAI_ORGANIZATION", None) or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there or litellm.openai_key or os.getenv("OPENAI_API_KEY") ) response = openai_assistants_api.add_message( thread_id=thread_id, message_data=message_data, api_base=api_base, api_key=api_key, timeout=timeout, max_retries=optional_params.max_retries, organization=organization, client=client, ) else: raise litellm.exceptions.BadRequestError( message="LiteLLM doesn't support {} for 'create_thread'. Only 'openai' is supported.".format( custom_llm_provider ), model="n/a", llm_provider=custom_llm_provider, response=httpx.Response( status_code=400, content="Unsupported provider", request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response
(custom_llm_provider: Literal['openai'], thread_id: str, role: Literal['user', 'assistant'], content: str, attachments: Optional[List[litellm.types.llms.openai.Attachment]] = None, metadata: Optional[dict] = None, client: Optional[openai.OpenAI] = None, **kwargs) -> openai.types.beta.threads.message.Message
64,246
litellm.main
aembedding
Asynchronously calls the `embedding` function with the given arguments and keyword arguments. Parameters: - `args` (tuple): Positional arguments to be passed to the `embedding` function. - `kwargs` (dict): Keyword arguments to be passed to the `embedding` function. Returns: - `response` (Any): The response returned by the `embedding` function.
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(*args, **kwargs)
64,247
litellm.main
ahealth_check
Support health checks for different providers. Return remaining rate limit, etc. For azure/openai -> completion.with_raw_response For rest -> litellm.acompletion()
def transcription( model: str, file: BinaryIO, ## OPTIONAL OPENAI PARAMS ## language: Optional[str] = None, prompt: Optional[str] = None, response_format: Optional[ Literal["json", "text", "srt", "verbose_json", "vtt"] ] = None, temperature: Optional[int] = None, # openai defaults this to 0 ## LITELLM PARAMS ## user: Optional[str] = None, timeout=600, # default to 10 minutes api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, max_retries: Optional[int] = None, litellm_logging_obj=None, custom_llm_provider=None, **kwargs, ): """ Calls openai + azure whisper endpoints. Allows router to load balance between them """ atranscription = kwargs.get("atranscription", False) litellm_call_id = kwargs.get("litellm_call_id", None) logger_fn = kwargs.get("logger_fn", None) proxy_server_request = kwargs.get("proxy_server_request", None) model_info = kwargs.get("model_info", None) metadata = kwargs.get("metadata", {}) if max_retries is None: max_retries = openai.DEFAULT_MAX_RETRIES model_response = litellm.utils.TranscriptionResponse() model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base) # type: ignore optional_params = { "language": language, "prompt": prompt, "response_format": response_format, "temperature": None, # openai defaults this to 0 } if custom_llm_provider == "azure": # azure configs api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE") api_version = ( api_version or litellm.api_version or get_secret("AZURE_API_VERSION") ) azure_ad_token = kwargs.pop("azure_ad_token", None) or get_secret( "AZURE_AD_TOKEN" ) api_key = ( api_key or litellm.api_key or litellm.azure_key or get_secret("AZURE_API_KEY") ) response = azure_chat_completions.audio_transcriptions( model=model, audio_file=file, optional_params=optional_params, model_response=model_response, atranscription=atranscription, timeout=timeout, logging_obj=litellm_logging_obj, api_base=api_base, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, max_retries=max_retries, ) elif custom_llm_provider == "openai": response = openai_chat_completions.audio_transcriptions( model=model, audio_file=file, optional_params=optional_params, model_response=model_response, atranscription=atranscription, timeout=timeout, logging_obj=litellm_logging_obj, max_retries=max_retries, ) return response
(model_params: dict, mode: Optional[Literal['completion', 'embedding', 'image_generation', 'chat']] = None, prompt: Optional[str] = None, input: Optional[List] = None, default_timeout: float = 6000)
64,249
litellm.main
aimage_generation
Asynchronously calls the `image_generation` function with the given arguments and keyword arguments. Parameters: - `args` (tuple): Positional arguments to be passed to the `image_generation` function. - `kwargs` (dict): Keyword arguments to be passed to the `image_generation` function. Returns: - `response` (Any): The response returned by the `image_generation` function.
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(*args, **kwargs)
64,252
litellm.main
amoderation
null
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(input: str, model: str, api_key: Optional[str] = None, **kwargs)
64,255
litellm.utils
async_mock_completion_streaming_obj
null
def mock_completion_streaming_obj(model_response, mock_response, model): for i in range(0, len(mock_response), 3): completion_obj = {"role": "assistant", "content": mock_response[i : i + 3]} model_response.choices[0].delta = completion_obj yield model_response
(model_response, mock_response, model)
64,257
litellm.main
atext_completion
Implemented to handle async streaming for the text completion endpoint
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(*args, **kwargs)
64,259
litellm.main
atranscription
Calls openai + azure whisper endpoints. Allows router to load balance between them
@client async def atext_completion(*args, **kwargs): """ Implemented to handle async streaming for the text completion endpoint """ loop = asyncio.get_event_loop() model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(text_completion, *args, **kwargs) # Add the context to the function ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) _, custom_llm_provider, _, _ = get_llm_provider( model=model, api_base=kwargs.get("api_base", None) ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider == "azure_text" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "groq" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "huggingface" or custom_llm_provider == "ollama" or custom_llm_provider == "vertex_ai" or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally response = await loop.run_in_executor(None, func_with_context) if asyncio.iscoroutine(response): response = await response else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) if kwargs.get("stream", False) == True: # return an async generator return TextCompletionStreamWrapper( completion_stream=_async_streaming( response=response, model=model, custom_llm_provider=custom_llm_provider, args=args, ), model=model, ) else: transformed_logprobs = None # only supported for TGI models try: raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: print_verbose(f"LiteLLM non blocking exception: {e}") ## TRANSLATE CHAT TO TEXT FORMAT ## if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) text_completion_response["model"] = response.get("model", None) text_choices = TextChoices() text_choices["text"] = response["choices"][0]["message"]["content"] text_choices["index"] = response["choices"][0]["index"] text_choices["logprobs"] = transformed_logprobs text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, extra_kwargs=kwargs, )
(*args, **kwargs)
64,261
litellm.main
batch_completion
Batch litellm.completion function for a given model. Args: model (str): The model to use for generating completions. messages (List, optional): List of messages to use as input for generating completions. Defaults to []. functions (List, optional): List of functions to use as input for generating completions. Defaults to []. function_call (str, optional): The function call to use as input for generating completions. Defaults to "". temperature (float, optional): The temperature parameter for generating completions. Defaults to None. top_p (float, optional): The top-p parameter for generating completions. Defaults to None. n (int, optional): The number of completions to generate. Defaults to None. stream (bool, optional): Whether to stream completions or not. Defaults to None. stop (optional): The stop parameter for generating completions. Defaults to None. max_tokens (float, optional): The maximum number of tokens to generate. Defaults to None. presence_penalty (float, optional): The presence penalty for generating completions. Defaults to None. frequency_penalty (float, optional): The frequency penalty for generating completions. Defaults to None. logit_bias (dict, optional): The logit bias for generating completions. Defaults to {}. user (str, optional): The user string for generating completions. Defaults to "". deployment_id (optional): The deployment ID for generating completions. Defaults to None. request_timeout (int, optional): The request timeout for generating completions. Defaults to None. Returns: list: A list of completion results.
def batch_completion( model: str, # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create messages: List = [], functions: Optional[List] = None, function_call: Optional[str] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, stream: Optional[bool] = None, stop=None, max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, user: Optional[str] = None, deployment_id=None, request_timeout: Optional[int] = None, timeout: Optional[int] = 600, # Optional liteLLM function params **kwargs, ): """ Batch litellm.completion function for a given model. Args: model (str): The model to use for generating completions. messages (List, optional): List of messages to use as input for generating completions. Defaults to []. functions (List, optional): List of functions to use as input for generating completions. Defaults to []. function_call (str, optional): The function call to use as input for generating completions. Defaults to "". temperature (float, optional): The temperature parameter for generating completions. Defaults to None. top_p (float, optional): The top-p parameter for generating completions. Defaults to None. n (int, optional): The number of completions to generate. Defaults to None. stream (bool, optional): Whether to stream completions or not. Defaults to None. stop (optional): The stop parameter for generating completions. Defaults to None. max_tokens (float, optional): The maximum number of tokens to generate. Defaults to None. presence_penalty (float, optional): The presence penalty for generating completions. Defaults to None. frequency_penalty (float, optional): The frequency penalty for generating completions. Defaults to None. logit_bias (dict, optional): The logit bias for generating completions. Defaults to {}. user (str, optional): The user string for generating completions. Defaults to "". deployment_id (optional): The deployment ID for generating completions. Defaults to None. request_timeout (int, optional): The request timeout for generating completions. Defaults to None. Returns: list: A list of completion results. """ args = locals() batch_messages = messages completions = [] model = model custom_llm_provider = None if model.split("/", 1)[0] in litellm.provider_list: custom_llm_provider = model.split("/", 1)[0] model = model.split("/", 1)[1] if custom_llm_provider == "vllm": optional_params = get_optional_params( functions=functions, function_call=function_call, temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user, # params to identify the model model=model, custom_llm_provider=custom_llm_provider, ) results = vllm.batch_completions( model=model, messages=batch_messages, custom_prompt_dict=litellm.custom_prompt_dict, optional_params=optional_params, ) # all non VLLM models for batch completion models else: def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] with ThreadPoolExecutor(max_workers=100) as executor: for sub_batch in chunks(batch_messages, 100): for message_list in sub_batch: kwargs_modified = args.copy() kwargs_modified["messages"] = message_list original_kwargs = {} if "kwargs" in kwargs_modified: original_kwargs = kwargs_modified.pop("kwargs") future = executor.submit( completion, **kwargs_modified, **original_kwargs ) completions.append(future) # Retrieve the results from the futures results = [future.result() for future in completions] return results
(model: str, messages: List = [], functions: Optional[List] = None, function_call: Optional[str] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, stream: Optional[bool] = None, stop=None, max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, user: Optional[str] = None, deployment_id=None, request_timeout: Optional[int] = None, timeout: Optional[int] = 600, **kwargs)
64,262
litellm.main
batch_completion_models
Send a request to multiple language models concurrently and return the response as soon as one of the models responds. Args: *args: Variable-length positional arguments passed to the completion function. **kwargs: Additional keyword arguments: - models (str or list of str): The language models to send requests to. - Other keyword arguments to be passed to the completion function. Returns: str or None: The response from one of the language models, or None if no response is received. Note: This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models. It sends requests concurrently and returns the response from the first model that responds.
def batch_completion_models(*args, **kwargs): """ Send a request to multiple language models concurrently and return the response as soon as one of the models responds. Args: *args: Variable-length positional arguments passed to the completion function. **kwargs: Additional keyword arguments: - models (str or list of str): The language models to send requests to. - Other keyword arguments to be passed to the completion function. Returns: str or None: The response from one of the language models, or None if no response is received. Note: This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models. It sends requests concurrently and returns the response from the first model that responds. """ import concurrent if "model" in kwargs: kwargs.pop("model") if "models" in kwargs: models = kwargs["models"] kwargs.pop("models") futures = {} with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor: for model in models: futures[model] = executor.submit( completion, *args, model=model, **kwargs ) for model, future in sorted( futures.items(), key=lambda x: models.index(x[0]) ): if future.result() is not None: return future.result() elif "deployments" in kwargs: deployments = kwargs["deployments"] kwargs.pop("deployments") kwargs.pop("model_list") nested_kwargs = kwargs.pop("kwargs", {}) futures = {} with concurrent.futures.ThreadPoolExecutor( max_workers=len(deployments) ) as executor: for deployment in deployments: for key in kwargs.keys(): if ( key not in deployment ): # don't override deployment values e.g. model name, api base, etc. deployment[key] = kwargs[key] kwargs = {**deployment, **nested_kwargs} futures[deployment["model"]] = executor.submit(completion, **kwargs) while futures: # wait for the first returned future print_verbose("\n\n waiting for next result\n\n") done, _ = concurrent.futures.wait( futures.values(), return_when=concurrent.futures.FIRST_COMPLETED ) print_verbose(f"done list\n{done}") for future in done: try: result = future.result() return result except Exception as e: # if model 1 fails, continue with response from model 2, model3 print_verbose( f"\n\ngot an exception, ignoring, removing from futures" ) print_verbose(futures) new_futures = {} for key, value in futures.items(): if future == value: print_verbose(f"removing key{key}") continue else: new_futures[key] = value futures = new_futures print_verbose(f"new futures{futures}") continue print_verbose("\n\ndone looping through futures\n\n") print_verbose(futures) return None # If no response is received from any model
(*args, **kwargs)
64,263
litellm.main
batch_completion_models_all_responses
Send a request to multiple language models concurrently and return a list of responses from all models that respond. Args: *args: Variable-length positional arguments passed to the completion function. **kwargs: Additional keyword arguments: - models (str or list of str): The language models to send requests to. - Other keyword arguments to be passed to the completion function. Returns: list: A list of responses from the language models that responded. Note: This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models. It sends requests concurrently and collects responses from all models that respond.
def batch_completion_models_all_responses(*args, **kwargs): """ Send a request to multiple language models concurrently and return a list of responses from all models that respond. Args: *args: Variable-length positional arguments passed to the completion function. **kwargs: Additional keyword arguments: - models (str or list of str): The language models to send requests to. - Other keyword arguments to be passed to the completion function. Returns: list: A list of responses from the language models that responded. Note: This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models. It sends requests concurrently and collects responses from all models that respond. """ import concurrent.futures # ANSI escape codes for colored output GREEN = "\033[92m" RED = "\033[91m" RESET = "\033[0m" if "model" in kwargs: kwargs.pop("model") if "models" in kwargs: models = kwargs["models"] kwargs.pop("models") responses = [] with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor: for idx, model in enumerate(models): future = executor.submit(completion, *args, model=model, **kwargs) if future.result() is not None: responses.append(future.result()) return responses
(*args, **kwargs)
64,267
litellm.utils
check_valid_key
Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10 Args: model (str): The name of the model to check the API key against. api_key (str): The API key to be checked. Returns: bool: True if the API key is valid for the model, False otherwise.
def check_valid_key(model: str, api_key: str): """ Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10 Args: model (str): The name of the model to check the API key against. api_key (str): The API key to be checked. Returns: bool: True if the API key is valid for the model, False otherwise. """ messages = [{"role": "user", "content": "Hey, how's it going?"}] try: litellm.completion( model=model, messages=messages, api_key=api_key, max_tokens=10 ) return True except AuthenticationError as e: return False except Exception as e: return False
(model: str, api_key: str)
64,270
litellm.utils
client
null
def client(original_function): global liteDebuggerClient, get_all_keys rules_obj = Rules() def check_coroutine(value) -> bool: if inspect.iscoroutine(value): return True elif inspect.iscoroutinefunction(value): return True else: return False def post_call_processing(original_response, model): try: if original_response is None: pass else: call_type = original_function.__name__ if ( call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value ): is_coroutine = check_coroutine(original_function) if is_coroutine == True: pass else: if isinstance(original_response, ModelResponse): model_response = original_response["choices"][0]["message"][ "content" ] ### POST-CALL RULES ### rules_obj.post_call_rules(input=model_response, model=model) except Exception as e: raise e @wraps(original_function) def wrapper(*args, **kwargs): # DO NOT MOVE THIS. It always needs to run first # Check if this is an async function. If so only execute the async function if ( kwargs.get("acompletion", False) == True or kwargs.get("aembedding", False) == True or kwargs.get("aimg_generation", False) == True or kwargs.get("amoderation", False) == True or kwargs.get("atext_completion", False) == True or kwargs.get("atranscription", False) == True ): # [OPTIONAL] CHECK MAX RETRIES / REQUEST if litellm.num_retries_per_request is not None: # check if previous_models passed in as ['litellm_params']['metadata]['previous_models'] previous_models = kwargs.get("metadata", {}).get( "previous_models", None ) if previous_models is not None: if litellm.num_retries_per_request <= len(previous_models): raise Exception(f"Max retries per request hit!") # MODEL CALL result = original_function(*args, **kwargs) if "stream" in kwargs and kwargs["stream"] == True: if ( "complete_response" in kwargs and kwargs["complete_response"] == True ): chunks = [] for idx, chunk in enumerate(result): chunks.append(chunk) return litellm.stream_chunk_builder( chunks, messages=kwargs.get("messages", None) ) else: return result return result # Prints Exactly what was passed to litellm function - don't execute any logic here - it should just print print_args_passed_to_litellm(original_function, args, kwargs) start_time = datetime.datetime.now() result = None logging_obj = kwargs.get("litellm_logging_obj", None) # only set litellm_call_id if its not in kwargs call_type = original_function.__name__ if "litellm_call_id" not in kwargs: kwargs["litellm_call_id"] = str(uuid.uuid4()) try: model = args[0] if len(args) > 0 else kwargs["model"] except: model = None if ( call_type != CallTypes.image_generation.value and call_type != CallTypes.text_completion.value ): raise ValueError("model param not passed in.") try: if logging_obj is None: logging_obj, kwargs = function_setup( original_function.__name__, rules_obj, start_time, *args, **kwargs ) kwargs["litellm_logging_obj"] = logging_obj # CHECK FOR 'os.environ/' in kwargs for k, v in kwargs.items(): if v is not None and isinstance(v, str) and v.startswith("os.environ/"): kwargs[k] = litellm.get_secret(v) # [OPTIONAL] CHECK BUDGET if litellm.max_budget: if litellm._current_cost > litellm.max_budget: raise BudgetExceededError( current_cost=litellm._current_cost, max_budget=litellm.max_budget, ) # [OPTIONAL] CHECK MAX RETRIES / REQUEST if litellm.num_retries_per_request is not None: # check if previous_models passed in as ['litellm_params']['metadata]['previous_models'] previous_models = kwargs.get("metadata", {}).get( "previous_models", None ) if previous_models is not None: if litellm.num_retries_per_request <= len(previous_models): raise Exception(f"Max retries per request hit!") # [OPTIONAL] CHECK CACHE print_verbose( f"SYNC kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}; kwargs.get('cache')['no-cache']: {kwargs.get('cache', {}).get('no-cache', False)}" ) # if caching is false or cache["no-cache"]==True, don't run this if ( ( ( ( kwargs.get("caching", None) is None and litellm.cache is not None ) or kwargs.get("caching", False) == True ) and kwargs.get("cache", {}).get("no-cache", False) != True ) and kwargs.get("aembedding", False) != True and kwargs.get("atext_completion", False) != True and kwargs.get("acompletion", False) != True and kwargs.get("aimg_generation", False) != True and kwargs.get("atranscription", False) != True ): # allow users to control returning cached responses from the completion function # checking cache print_verbose(f"INSIDE CHECKING CACHE") if ( litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types ): print_verbose(f"Checking Cache") preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) kwargs["preset_cache_key"] = ( preset_cache_key # for streaming calls, we need to pass the preset_cache_key ) cached_result = litellm.cache.get_cache(*args, **kwargs) if cached_result != None: if "detail" in cached_result: # implies an error occurred pass else: call_type = original_function.__name__ print_verbose( f"Cache Response Object routing: call_type - {call_type}; cached_result instace: {type(cached_result)}" ) if call_type == CallTypes.completion.value and isinstance( cached_result, dict ): cached_result = convert_to_model_response_object( response_object=cached_result, model_response_object=ModelResponse(), stream=kwargs.get("stream", False), ) if kwargs.get("stream", False) == True: cached_result = CustomStreamWrapper( completion_stream=cached_result, model=model, custom_llm_provider="cached_response", logging_obj=logging_obj, ) elif call_type == CallTypes.embedding.value and isinstance( cached_result, dict ): cached_result = convert_to_model_response_object( response_object=cached_result, response_type="embedding", ) # LOG SUCCESS cache_hit = True end_time = datetime.datetime.now() ( model, custom_llm_provider, dynamic_api_key, api_base, ) = litellm.get_llm_provider( model=model, custom_llm_provider=kwargs.get( "custom_llm_provider", None ), api_base=kwargs.get("api_base", None), api_key=kwargs.get("api_key", None), ) print_verbose( f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" ) logging_obj.update_environment_variables( model=model, user=kwargs.get("user", None), optional_params={}, litellm_params={ "logger_fn": kwargs.get("logger_fn", None), "acompletion": False, "metadata": kwargs.get("metadata", {}), "model_info": kwargs.get("model_info", {}), "proxy_server_request": kwargs.get( "proxy_server_request", None ), "preset_cache_key": kwargs.get( "preset_cache_key", None ), "stream_response": kwargs.get( "stream_response", {} ), }, input=kwargs.get("messages", ""), api_key=kwargs.get("api_key", None), original_response=str(cached_result), additional_args=None, stream=kwargs.get("stream", False), ) threading.Thread( target=logging_obj.success_handler, args=(cached_result, start_time, end_time, cache_hit), ).start() return cached_result # CHECK MAX TOKENS if ( kwargs.get("max_tokens", None) is not None and model is not None and litellm.modify_params == True # user is okay with params being modified and ( call_type == CallTypes.acompletion.value or call_type == CallTypes.completion.value ) ): try: base_model = model if kwargs.get("hf_model_name", None) is not None: base_model = f"huggingface/{kwargs.get('hf_model_name')}" max_output_tokens = ( get_max_tokens(model=base_model) or 4096 ) # assume min context window is 4k tokens user_max_tokens = kwargs.get("max_tokens") ## Scenario 1: User limit + prompt > model limit messages = None if len(args) > 1: messages = args[1] elif kwargs.get("messages", None): messages = kwargs["messages"] input_tokens = token_counter(model=base_model, messages=messages) input_tokens += max( 0.1 * input_tokens, 10 ) # give at least a 10 token buffer. token counting can be imprecise. if input_tokens > max_output_tokens: pass # allow call to fail normally elif user_max_tokens + input_tokens > max_output_tokens: user_max_tokens = max_output_tokens - input_tokens print_verbose(f"user_max_tokens: {user_max_tokens}") kwargs["max_tokens"] = int( round(user_max_tokens) ) # make sure max tokens is always an int except Exception as e: print_verbose(f"Error while checking max token limit: {str(e)}") # MODEL CALL result = original_function(*args, **kwargs) end_time = datetime.datetime.now() if "stream" in kwargs and kwargs["stream"] == True: if ( "complete_response" in kwargs and kwargs["complete_response"] == True ): chunks = [] for idx, chunk in enumerate(result): chunks.append(chunk) return litellm.stream_chunk_builder( chunks, messages=kwargs.get("messages", None) ) else: return result elif "acompletion" in kwargs and kwargs["acompletion"] == True: return result elif "aembedding" in kwargs and kwargs["aembedding"] == True: return result elif "aimg_generation" in kwargs and kwargs["aimg_generation"] == True: return result elif "atranscription" in kwargs and kwargs["atranscription"] == True: return result ### POST-CALL RULES ### post_call_processing(original_response=result, model=model or None) # [OPTIONAL] ADD TO CACHE if ( litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types ) and (kwargs.get("cache", {}).get("no-store", False) != True): litellm.cache.add_cache(result, *args, **kwargs) # LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated verbose_logger.info(f"Wrapper: Completed Call, calling success_handler") threading.Thread( target=logging_obj.success_handler, args=(result, start_time, end_time) ).start() # RETURN RESULT if hasattr(result, "_hidden_params"): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) result._hidden_params["api_base"] = get_api_base( model=model, optional_params=getattr(logging_obj, "optional_params", {}), ) result._response_ms = ( end_time - start_time ).total_seconds() * 1000 # return response latency in ms like openai return result except Exception as e: call_type = original_function.__name__ if call_type == CallTypes.completion.value: num_retries = ( kwargs.get("num_retries", None) or litellm.num_retries or None ) litellm.num_retries = ( None # set retries to None to prevent infinite loops ) context_window_fallback_dict = kwargs.get( "context_window_fallback_dict", {} ) _is_litellm_router_call = "model_group" in kwargs.get( "metadata", {} ) # check if call from litellm.router/proxy if ( num_retries and not _is_litellm_router_call ): # only enter this if call is not from litellm router/proxy. router has it's own logic for retrying if ( isinstance(e, openai.APIError) or isinstance(e, openai.Timeout) or isinstance(e, openai.APIConnectionError) ): kwargs["num_retries"] = num_retries return litellm.completion_with_retries(*args, **kwargs) elif ( isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict ): if len(args) > 0: args[0] = context_window_fallback_dict[model] else: kwargs["model"] = context_window_fallback_dict[model] return original_function(*args, **kwargs) traceback_exception = traceback.format_exc() end_time = datetime.datetime.now() # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated if logging_obj: logging_obj.failure_handler( e, traceback_exception, start_time, end_time ) # DO NOT MAKE THREADED - router retry fallback relies on this! my_thread = threading.Thread( target=handle_failure, args=(e, traceback_exception, start_time, end_time, args, kwargs), ) # don't interrupt execution of main thread my_thread.start() if hasattr(e, "message"): if ( liteDebuggerClient and liteDebuggerClient.dashboard_url != None ): # make it easy to get to the debugger logs if you've initialized it e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}" raise e @wraps(original_function) async def wrapper_async(*args, **kwargs): print_args_passed_to_litellm(original_function, args, kwargs) start_time = datetime.datetime.now() result = None logging_obj = kwargs.get("litellm_logging_obj", None) # only set litellm_call_id if its not in kwargs call_type = original_function.__name__ if "litellm_call_id" not in kwargs: kwargs["litellm_call_id"] = str(uuid.uuid4()) model = "" try: model = args[0] if len(args) > 0 else kwargs["model"] except: if ( call_type != CallTypes.aimage_generation.value # model optional and call_type != CallTypes.atext_completion.value # can also be engine ): raise ValueError("model param not passed in.") try: if logging_obj is None: logging_obj, kwargs = function_setup( original_function.__name__, rules_obj, start_time, *args, **kwargs ) kwargs["litellm_logging_obj"] = logging_obj # [OPTIONAL] CHECK BUDGET if litellm.max_budget: if litellm._current_cost > litellm.max_budget: raise BudgetExceededError( current_cost=litellm._current_cost, max_budget=litellm.max_budget, ) # [OPTIONAL] CHECK CACHE print_verbose( f"ASYNC kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}; kwargs.get('cache'): {kwargs.get('cache', None)}" ) # if caching is false, don't run this final_embedding_cached_response = None if ( (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True ) and ( kwargs.get("cache", {}).get("no-cache", False) != True ): # allow users to control returning cached responses from the completion function # checking cache print_verbose("INSIDE CHECKING CACHE") if ( litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types ): print_verbose(f"Checking Cache") if call_type == CallTypes.aembedding.value and isinstance( kwargs["input"], list ): tasks = [] for idx, i in enumerate(kwargs["input"]): preset_cache_key = litellm.cache.get_cache_key( *args, **{**kwargs, "input": i} ) tasks.append( litellm.cache.async_get_cache( cache_key=preset_cache_key ) ) cached_result = await asyncio.gather(*tasks) ## check if cached result is None ## if cached_result is not None and isinstance( cached_result, list ): if len(cached_result) == 1 and cached_result[0] is None: cached_result = None elif isinstance( litellm.cache.cache, RedisSemanticCache ) or isinstance(litellm.cache.cache, RedisCache): preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) kwargs["preset_cache_key"] = ( preset_cache_key # for streaming calls, we need to pass the preset_cache_key ) cached_result = await litellm.cache.async_get_cache( *args, **kwargs ) else: # for s3 caching. [NOT RECOMMENDED IN PROD - this will slow down responses since boto3 is sync] preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) kwargs["preset_cache_key"] = ( preset_cache_key # for streaming calls, we need to pass the preset_cache_key ) cached_result = litellm.cache.get_cache(*args, **kwargs) if cached_result is not None and not isinstance( cached_result, list ): print_verbose(f"Cache Hit!") cache_hit = True end_time = datetime.datetime.now() ( model, custom_llm_provider, dynamic_api_key, api_base, ) = litellm.get_llm_provider( model=model, custom_llm_provider=kwargs.get("custom_llm_provider", None), api_base=kwargs.get("api_base", None), api_key=kwargs.get("api_key", None), ) print_verbose( f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" ) logging_obj.update_environment_variables( model=model, user=kwargs.get("user", None), optional_params={}, litellm_params={ "logger_fn": kwargs.get("logger_fn", None), "acompletion": True, "metadata": kwargs.get("metadata", {}), "model_info": kwargs.get("model_info", {}), "proxy_server_request": kwargs.get( "proxy_server_request", None ), "preset_cache_key": kwargs.get( "preset_cache_key", None ), "stream_response": kwargs.get("stream_response", {}), "api_base": kwargs.get("api_base", ""), }, input=kwargs.get("messages", ""), api_key=kwargs.get("api_key", None), original_response=str(cached_result), additional_args=None, stream=kwargs.get("stream", False), ) call_type = original_function.__name__ if call_type == CallTypes.acompletion.value and isinstance( cached_result, dict ): if kwargs.get("stream", False) == True: cached_result = convert_to_streaming_response_async( response_object=cached_result, ) cached_result = CustomStreamWrapper( completion_stream=cached_result, model=model, custom_llm_provider="cached_response", logging_obj=logging_obj, ) else: cached_result = convert_to_model_response_object( response_object=cached_result, model_response_object=ModelResponse(), ) if ( call_type == CallTypes.atext_completion.value and isinstance(cached_result, dict) ): if kwargs.get("stream", False) == True: cached_result = convert_to_streaming_response_async( response_object=cached_result, ) cached_result = CustomStreamWrapper( completion_stream=cached_result, model=model, custom_llm_provider="cached_response", logging_obj=logging_obj, ) else: cached_result = TextCompletionResponse(**cached_result) elif call_type == CallTypes.aembedding.value and isinstance( cached_result, dict ): cached_result = convert_to_model_response_object( response_object=cached_result, model_response_object=EmbeddingResponse(), response_type="embedding", ) elif call_type == CallTypes.atranscription.value and isinstance( cached_result, dict ): hidden_params = { "model": "whisper-1", "custom_llm_provider": custom_llm_provider, } cached_result = convert_to_model_response_object( response_object=cached_result, model_response_object=TranscriptionResponse(), response_type="audio_transcription", hidden_params=hidden_params, ) if kwargs.get("stream", False) == False: # LOG SUCCESS asyncio.create_task( logging_obj.async_success_handler( cached_result, start_time, end_time, cache_hit ) ) threading.Thread( target=logging_obj.success_handler, args=(cached_result, start_time, end_time, cache_hit), ).start() cache_key = kwargs.get("preset_cache_key", None) cached_result._hidden_params["cache_key"] = cache_key return cached_result elif ( call_type == CallTypes.aembedding.value and cached_result is not None and isinstance(cached_result, list) and litellm.cache is not None and not isinstance( litellm.cache.cache, S3Cache ) # s3 doesn't support bulk writing. Exclude. ): remaining_list = [] non_null_list = [] for idx, cr in enumerate(cached_result): if cr is None: remaining_list.append(kwargs["input"][idx]) else: non_null_list.append((idx, cr)) original_kwargs_input = kwargs["input"] kwargs["input"] = remaining_list if len(non_null_list) > 0: print_verbose( f"EMBEDDING CACHE HIT! - {len(non_null_list)}" ) final_embedding_cached_response = EmbeddingResponse( model=kwargs.get("model"), data=[None] * len(original_kwargs_input), ) final_embedding_cached_response._hidden_params[ "cache_hit" ] = True for val in non_null_list: idx, cr = val # (idx, cr) tuple if cr is not None: final_embedding_cached_response.data[idx] = ( Embedding( embedding=cr["embedding"], index=idx, object="embedding", ) ) if len(remaining_list) == 0: # LOG SUCCESS cache_hit = True end_time = datetime.datetime.now() ( model, custom_llm_provider, dynamic_api_key, api_base, ) = litellm.get_llm_provider( model=model, custom_llm_provider=kwargs.get( "custom_llm_provider", None ), api_base=kwargs.get("api_base", None), api_key=kwargs.get("api_key", None), ) print_verbose( f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" ) logging_obj.update_environment_variables( model=model, user=kwargs.get("user", None), optional_params={}, litellm_params={ "logger_fn": kwargs.get("logger_fn", None), "acompletion": True, "metadata": kwargs.get("metadata", {}), "model_info": kwargs.get("model_info", {}), "proxy_server_request": kwargs.get( "proxy_server_request", None ), "preset_cache_key": kwargs.get( "preset_cache_key", None ), "stream_response": kwargs.get( "stream_response", {} ), "api_base": "", }, input=kwargs.get("messages", ""), api_key=kwargs.get("api_key", None), original_response=str(final_embedding_cached_response), additional_args=None, stream=kwargs.get("stream", False), ) asyncio.create_task( logging_obj.async_success_handler( final_embedding_cached_response, start_time, end_time, cache_hit, ) ) threading.Thread( target=logging_obj.success_handler, args=( final_embedding_cached_response, start_time, end_time, cache_hit, ), ).start() return final_embedding_cached_response # MODEL CALL result = await original_function(*args, **kwargs) end_time = datetime.datetime.now() if "stream" in kwargs and kwargs["stream"] == True: if ( "complete_response" in kwargs and kwargs["complete_response"] == True ): chunks = [] for idx, chunk in enumerate(result): chunks.append(chunk) return litellm.stream_chunk_builder( chunks, messages=kwargs.get("messages", None) ) else: return result # ADD HIDDEN PARAMS - additional call metadata if hasattr(result, "_hidden_params"): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) result._hidden_params["api_base"] = get_api_base( model=model, optional_params=kwargs, ) if ( isinstance(result, ModelResponse) or isinstance(result, EmbeddingResponse) or isinstance(result, TranscriptionResponse) ): result._response_ms = ( end_time - start_time ).total_seconds() * 1000 # return response latency in ms like openai ### POST-CALL RULES ### post_call_processing(original_response=result, model=model) # [OPTIONAL] ADD TO CACHE if ( (litellm.cache is not None) and ( str(original_function.__name__) in litellm.cache.supported_call_types ) and (kwargs.get("cache", {}).get("no-store", False) != True) ): if ( isinstance(result, litellm.ModelResponse) or isinstance(result, litellm.EmbeddingResponse) or isinstance(result, TranscriptionResponse) ): if ( isinstance(result, EmbeddingResponse) and isinstance(kwargs["input"], list) and litellm.cache is not None and not isinstance( litellm.cache.cache, S3Cache ) # s3 doesn't support bulk writing. Exclude. ): asyncio.create_task( litellm.cache.async_add_cache_pipeline( result, *args, **kwargs ) ) elif isinstance(litellm.cache.cache, S3Cache): threading.Thread( target=litellm.cache.add_cache, args=(result,) + args, kwargs=kwargs, ).start() else: asyncio.create_task( litellm.cache.async_add_cache( result.json(), *args, **kwargs ) ) else: asyncio.create_task( litellm.cache.async_add_cache(result, *args, **kwargs) ) # LOG SUCCESS - handle streaming success logging in the _next_ object print_verbose( f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" ) # check if user does not want this to be logged asyncio.create_task( logging_obj.async_success_handler(result, start_time, end_time) ) threading.Thread( target=logging_obj.success_handler, args=(result, start_time, end_time), ).start() # REBUILD EMBEDDING CACHING if ( isinstance(result, EmbeddingResponse) and final_embedding_cached_response is not None ): idx = 0 final_data_list = [] for item in final_embedding_cached_response.data: if item is None: final_data_list.append(result.data[idx]) idx += 1 else: final_data_list.append(item) final_embedding_cached_response.data = final_data_list final_embedding_cached_response._hidden_params["cache_hit"] = True final_embedding_cached_response._response_ms = ( end_time - start_time ).total_seconds() * 1000 return final_embedding_cached_response return result except Exception as e: traceback_exception = traceback.format_exc() end_time = datetime.datetime.now() if logging_obj: try: logging_obj.failure_handler( e, traceback_exception, start_time, end_time ) # DO NOT MAKE THREADED - router retry fallback relies on this! except Exception as e: raise e try: await logging_obj.async_failure_handler( e, traceback_exception, start_time, end_time ) except Exception as e: raise e call_type = original_function.__name__ if call_type == CallTypes.acompletion.value: num_retries = ( kwargs.get("num_retries", None) or litellm.num_retries or None ) litellm.num_retries = ( None # set retries to None to prevent infinite loops ) context_window_fallback_dict = kwargs.get( "context_window_fallback_dict", {} ) _is_litellm_router_call = "model_group" in kwargs.get( "metadata", {} ) # check if call from litellm.router/proxy if ( num_retries and not _is_litellm_router_call ): # only enter this if call is not from litellm router/proxy. router has it's own logic for retrying try: kwargs["num_retries"] = num_retries kwargs["original_function"] = original_function if isinstance( e, openai.RateLimitError ): # rate limiting specific error kwargs["retry_strategy"] = "exponential_backoff_retry" elif isinstance(e, openai.APIError): # generic api error kwargs["retry_strategy"] = "constant_retry" return await litellm.acompletion_with_retries(*args, **kwargs) except: pass elif ( isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict ): if len(args) > 0: args[0] = context_window_fallback_dict[model] else: kwargs["model"] = context_window_fallback_dict[model] return await original_function(*args, **kwargs) raise e is_coroutine = inspect.iscoroutinefunction(original_function) # Return the appropriate wrapper based on the original function type if is_coroutine: return wrapper_async else: return wrapper
(original_function)
64,274
litellm.main
completion
Perform a completion() using any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly) Parameters: model (str): The name of the language model to use for text completion. see all supported LLMs: https://docs.litellm.ai/docs/providers/ messages (List): A list of message objects representing the conversation context (default is an empty list). OPTIONAL PARAMS functions (List, optional): A list of functions to apply to the conversation messages (default is an empty list). function_call (str, optional): The name of the function to call within the conversation (default is an empty string). temperature (float, optional): The temperature parameter for controlling the randomness of the output (default is 1.0). top_p (float, optional): The top-p parameter for nucleus sampling (default is 1.0). n (int, optional): The number of completions to generate (default is 1). stream (bool, optional): If True, return a streaming response (default is False). stream_options (dict, optional): A dictionary containing options for the streaming response. Only set this when you set stream: true. stop(string/list, optional): - Up to 4 sequences where the LLM API will stop generating further tokens. max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity). presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far. frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far. logit_bias (dict, optional): Used to modify the probability of specific tokens appearing in the completion. user (str, optional): A unique identifier representing your end-user. This can help the LLM provider to monitor and detect abuse. logprobs (bool, optional): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message top_logprobs (int, optional): An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used. metadata (dict, optional): Pass in additional metadata to tag your completion calls - eg. prompt version, details, etc. api_base (str, optional): Base URL for the API (default is None). api_version (str, optional): API version (default is None). api_key (str, optional): API key (default is None). model_list (list, optional): List of api base, version, keys extra_headers (dict, optional): Additional headers to include in the request. LITELLM Specific Params mock_response (str, optional): If provided, return a mock completion response for testing or debugging purposes (default is None). custom_llm_provider (str, optional): Used for Non-OpenAI LLMs, Example usage for bedrock, set model="amazon.titan-tg1-large" and custom_llm_provider="bedrock" max_retries (int, optional): The number of retries to attempt (default is 0). Returns: ModelResponse: A response object containing the generated completion and associated metadata. Note: - This function is used to perform completions() using the specified language model. - It supports various optional parameters for customizing the completion behavior. - If 'mock_response' is provided, a mock completion response is returned for testing or debugging.
def embedding( model, input=[], # Optional params dimensions: Optional[int] = None, timeout=600, # default to 10 minutes # set api_base, api_version, api_key api_base: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool = False, user: Optional[str] = None, custom_llm_provider=None, litellm_call_id=None, litellm_logging_obj=None, logger_fn=None, **kwargs, ): """ Embedding function that calls an API to generate embeddings for the given input. Parameters: - model: The embedding model to use. - input: The input for which embeddings are to be generated. - dimensions: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. - timeout: The timeout value for the API call, default 10 mins - litellm_call_id: The call ID for litellm logging. - litellm_logging_obj: The litellm logging object. - logger_fn: The logger function. - api_base: Optional. The base URL for the API. - api_version: Optional. The version of the API. - api_key: Optional. The API key to use. - api_type: Optional. The type of the API. - caching: A boolean indicating whether to enable caching. - custom_llm_provider: The custom llm provider. Returns: - response: The response received from the API call. Raises: - exception_type: If an exception occurs during the API call. """ azure = kwargs.get("azure", None) client = kwargs.pop("client", None) rpm = kwargs.pop("rpm", None) tpm = kwargs.pop("tpm", None) max_parallel_requests = kwargs.pop("max_parallel_requests", None) model_info = kwargs.get("model_info", None) metadata = kwargs.get("metadata", None) encoding_format = kwargs.get("encoding_format", None) proxy_server_request = kwargs.get("proxy_server_request", None) aembedding = kwargs.get("aembedding", None) ### CUSTOM MODEL COST ### input_cost_per_token = kwargs.get("input_cost_per_token", None) output_cost_per_token = kwargs.get("output_cost_per_token", None) input_cost_per_second = kwargs.get("input_cost_per_second", None) output_cost_per_second = kwargs.get("output_cost_per_second", None) openai_params = [ "user", "dimensions", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "max_retries", "encoding_format", ] litellm_params = [ "metadata", "aembedding", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "retry_policy", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "max_parallel_requests", "input_cost_per_token", "output_cost_per_token", "input_cost_per_second", "output_cost_per_second", "hf_model_name", "proxy_server_request", "model_info", "preset_cache_key", "caching_groups", "ttl", "cache", "no-log", "region_name", "allowed_model_region", ] default_params = openai_params + litellm_params non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key, ) optional_params = get_optional_params_embeddings( model=model, user=user, dimensions=dimensions, encoding_format=encoding_format, custom_llm_provider=custom_llm_provider, **non_default_params, ) ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ### if input_cost_per_token is not None and output_cost_per_token is not None: litellm.register_model( { model: { "input_cost_per_token": input_cost_per_token, "output_cost_per_token": output_cost_per_token, "litellm_provider": custom_llm_provider, } } ) if input_cost_per_second is not None: # time based pricing just needs cost in place output_cost_per_second = output_cost_per_second or 0.0 litellm.register_model( { model: { "input_cost_per_second": input_cost_per_second, "output_cost_per_second": output_cost_per_second, "litellm_provider": custom_llm_provider, } } ) try: response = None logging = litellm_logging_obj logging.update_environment_variables( model=model, user=user, optional_params=optional_params, litellm_params={ "timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info, "metadata": metadata, "aembedding": aembedding, "preset_cache_key": None, "stream_response": {}, }, ) if azure == True or custom_llm_provider == "azure": # azure configs api_type = get_secret("AZURE_API_TYPE") or "azure" api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE") api_version = ( api_version or litellm.api_version or get_secret("AZURE_API_VERSION") ) azure_ad_token = optional_params.pop("azure_ad_token", None) or get_secret( "AZURE_AD_TOKEN" ) api_key = ( api_key or litellm.api_key or litellm.azure_key or get_secret("AZURE_API_KEY") ) ## EMBEDDING CALL response = azure_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif ( model in litellm.open_ai_embedding_models or custom_llm_provider == "openai" ): api_base = ( api_base or litellm.api_base or get_secret("OPENAI_API_BASE") or "https://api.openai.com/v1" ) openai.organization = ( litellm.organization or get_secret("OPENAI_ORGANIZATION") or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( api_key or litellm.api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") ) api_type = "openai" api_version = None ## EMBEDDING CALL response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "cohere": cohere_key = ( api_key or litellm.cohere_key or get_secret("COHERE_API_KEY") or get_secret("CO_API_KEY") or litellm.api_key ) response = cohere.embedding( model=model, input=input, optional_params=optional_params, encoding=encoding, api_key=cohere_key, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "huggingface": api_key = ( api_key or litellm.huggingface_key or get_secret("HUGGINGFACE_API_KEY") or litellm.api_key ) response = huggingface.embedding( model=model, input=input, encoding=encoding, api_key=api_key, api_base=api_base, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "bedrock": response = bedrock.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "triton": if api_base is None: raise ValueError( "api_base is required for triton. Please pass `api_base`" ) response = triton_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "vertex_ai": vertex_ai_project = ( optional_params.pop("vertex_project", None) or optional_params.pop("vertex_ai_project", None) or litellm.vertex_project or get_secret("VERTEXAI_PROJECT") or get_secret("VERTEX_PROJECT") ) vertex_ai_location = ( optional_params.pop("vertex_location", None) or optional_params.pop("vertex_ai_location", None) or litellm.vertex_location or get_secret("VERTEXAI_LOCATION") or get_secret("VERTEX_LOCATION") ) vertex_credentials = ( optional_params.pop("vertex_credentials", None) or optional_params.pop("vertex_ai_credentials", None) or get_secret("VERTEXAI_CREDENTIALS") or get_secret("VERTEX_CREDENTIALS") ) response = vertex_ai.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), vertex_project=vertex_ai_project, vertex_location=vertex_ai_location, vertex_credentials=vertex_credentials, aembedding=aembedding, print_verbose=print_verbose, ) elif custom_llm_provider == "oobabooga": response = oobabooga.embedding( model=model, input=input, encoding=encoding, api_base=api_base, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "ollama": api_base = ( litellm.api_base or api_base or get_secret("OLLAMA_API_BASE") or "http://localhost:11434" ) if isinstance(input, str): input = [input] if not all(isinstance(item, str) for item in input): raise litellm.BadRequestError( message=f"Invalid input for ollama embeddings. input={input}", model=model, # type: ignore llm_provider="ollama", # type: ignore ) ollama_embeddings_fn = ( ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings ) response = ollama_embeddings_fn( api_base=api_base, model=model, prompts=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "sagemaker": response = sagemaker.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), print_verbose=print_verbose, ) elif custom_llm_provider == "mistral": api_key = api_key or litellm.api_key or get_secret("MISTRAL_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "voyage": api_key = api_key or litellm.api_key or get_secret("VOYAGE_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "xinference": api_key = ( api_key or litellm.api_key or get_secret("XINFERENCE_API_KEY") or "stub-xinference-key" ) # xinference does not need an api key, pass a stub key if user did not set one api_base = ( api_base or litellm.api_base or get_secret("XINFERENCE_API_BASE") or "http://127.0.0.1:9997/v1" ) response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "watsonx": response = watsonx.IBMWatsonXAI().embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) else: args = locals() raise ValueError(f"No valid embedding model args passed in - {args}") if response is not None and hasattr(response, "_hidden_params"): response._hidden_params["custom_llm_provider"] = custom_llm_provider return response except Exception as e: ## LOGGING logging.post_call( input=input, api_key=api_key, original_response=str(e), ) ## Map to OpenAI Exception raise exception_type( model=model, original_exception=e, custom_llm_provider=custom_llm_provider, extra_kwargs=kwargs, )
(model: str, messages: List = [], timeout: Union[float, str, openai.Timeout, NoneType] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, stream: Optional[bool] = None, stream_options: Optional[dict] = None, stop=None, max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, user: Optional[str] = None, response_format: Optional[dict] = None, seed: Optional[int] = None, tools: Optional[List] = None, tool_choice: Optional[str] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, deployment_id=None, extra_headers: Optional[dict] = None, functions: Optional[List] = None, function_call: Optional[str] = None, base_url: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, model_list: Optional[list] = None, **kwargs) -> Union[litellm.utils.ModelResponse, litellm.utils.CustomStreamWrapper]
64,275
litellm.utils
completion_cost
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm. Parameters: completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request. [OPTIONAL PARAMS] model (str): Optional. The name of the language model used in the completion calls prompt (str): Optional. The input prompt passed to the llm completion (str): Optional. The output completion text from the llm total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds Returns: float: The cost in USD dollars for the completion based on the provided parameters. Note: - If completion_response is provided, the function extracts token information and the model name from it. - If completion_response is not provided, the function calculates token counts based on the model and input text. - The cost is calculated based on the model, prompt tokens, and completion tokens. - For certain models containing "togethercomputer" in the name, prices are based on the model size. - For Replicate models, the cost is calculated based on the total time used for the request. Exceptions: - If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
def completion_cost( completion_response=None, model=None, prompt="", messages: List = [], completion="", total_time=0.0, # used for replicate, sagemaker call_type: Literal[ "completion", "acompletion", "embedding", "aembedding", "atext_completion", "text_completion", "image_generation", "aimage_generation", "transcription", "atranscription", ] = "completion", ### REGION ### custom_llm_provider=None, region_name=None, # used for bedrock pricing ### IMAGE GEN ### size=None, quality=None, n=None, # number of images ): """ Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm. Parameters: completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request. [OPTIONAL PARAMS] model (str): Optional. The name of the language model used in the completion calls prompt (str): Optional. The input prompt passed to the llm completion (str): Optional. The output completion text from the llm total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds Returns: float: The cost in USD dollars for the completion based on the provided parameters. Note: - If completion_response is provided, the function extracts token information and the model name from it. - If completion_response is not provided, the function calculates token counts based on the model and input text. - The cost is calculated based on the model, prompt tokens, and completion tokens. - For certain models containing "togethercomputer" in the name, prices are based on the model size. - For Replicate models, the cost is calculated based on the total time used for the request. Exceptions: - If an error occurs during execution, the function returns 0.0 without blocking the user's execution path. """ try: if ( (call_type == "aimage_generation" or call_type == "image_generation") and model is not None and isinstance(model, str) and len(model) == 0 and custom_llm_provider == "azure" ): model = "dall-e-2" # for dall-e-2, azure expects an empty model name # Handle Inputs to completion_cost prompt_tokens = 0 completion_tokens = 0 custom_llm_provider = None if completion_response is not None: # get input/output tokens from completion_response prompt_tokens = completion_response.get("usage", {}).get("prompt_tokens", 0) completion_tokens = completion_response.get("usage", {}).get( "completion_tokens", 0 ) total_time = completion_response.get("_response_ms", 0) verbose_logger.debug( f"completion_response response ms: {completion_response.get('_response_ms')} " ) model = model or completion_response.get( "model", None ) # check if user passed an override for model, if it's none check completion_response['model'] if hasattr(completion_response, "_hidden_params"): if ( completion_response._hidden_params.get("model", None) is not None and len(completion_response._hidden_params["model"]) > 0 ): model = completion_response._hidden_params.get("model", model) custom_llm_provider = completion_response._hidden_params.get( "custom_llm_provider", "" ) region_name = completion_response._hidden_params.get( "region_name", region_name ) size = completion_response._hidden_params.get( "optional_params", {} ).get( "size", "1024-x-1024" ) # openai default quality = completion_response._hidden_params.get( "optional_params", {} ).get( "quality", "standard" ) # openai default n = completion_response._hidden_params.get("optional_params", {}).get( "n", 1 ) # openai default else: if len(messages) > 0: prompt_tokens = token_counter(model=model, messages=messages) elif len(prompt) > 0: prompt_tokens = token_counter(model=model, text=prompt) completion_tokens = token_counter(model=model, text=completion) if model == None: raise ValueError( f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}" ) if ( call_type == CallTypes.image_generation.value or call_type == CallTypes.aimage_generation.value ): ### IMAGE GENERATION COST CALCULATION ### # fix size to match naming convention if "x" in size and "-x-" not in size: size = size.replace("x", "-x-") image_gen_model_name = f"{size}/{model}" image_gen_model_name_with_quality = image_gen_model_name if quality is not None: image_gen_model_name_with_quality = f"{quality}/{image_gen_model_name}" size = size.split("-x-") height = int(size[0]) # if it's 1024-x-1024 vs. 1024x1024 width = int(size[1]) verbose_logger.debug(f"image_gen_model_name: {image_gen_model_name}") verbose_logger.debug( f"image_gen_model_name_with_quality: {image_gen_model_name_with_quality}" ) if image_gen_model_name in litellm.model_cost: return ( litellm.model_cost[image_gen_model_name]["input_cost_per_pixel"] * height * width * n ) elif image_gen_model_name_with_quality in litellm.model_cost: return ( litellm.model_cost[image_gen_model_name_with_quality][ "input_cost_per_pixel" ] * height * width * n ) else: raise Exception( f"Model={image_gen_model_name} not found in completion cost model map" ) # Calculate cost based on prompt_tokens, completion_tokens if ( "togethercomputer" in model or "together_ai" in model or custom_llm_provider == "together_ai" ): # together ai prices based on size of llm # get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json model = get_model_params_and_category(model) # replicate llms are calculate based on time for request running # see https://replicate.com/pricing elif ( model in litellm.replicate_models or "replicate" in model ) and model not in litellm.model_cost: # for unmapped replicate model, default to replicate's time tracking logic return get_replicate_completion_pricing(completion_response, total_time) ( prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar, ) = cost_per_token( model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, custom_llm_provider=custom_llm_provider, response_time_ms=total_time, region_name=region_name, ) _final_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar print_verbose( f"final cost: {_final_cost}; prompt_tokens_cost_usd_dollar: {prompt_tokens_cost_usd_dollar}; completion_tokens_cost_usd_dollar: {completion_tokens_cost_usd_dollar}" ) return _final_cost except Exception as e: raise e
(completion_response=None, model=None, prompt='', messages: List = [], completion='', total_time=0.0, call_type: Literal['completion', 'acompletion', 'embedding', 'aembedding', 'atext_completion', 'text_completion', 'image_generation', 'aimage_generation', 'transcription', 'atranscription'] = 'completion', custom_llm_provider=None, region_name=None, size=None, quality=None, n=None)
64,276
litellm.utils
completion_with_fallbacks
null
def completion_with_fallbacks(**kwargs): nested_kwargs = kwargs.pop("kwargs", {}) response = None rate_limited_models = set() model_expiration_times = {} start_time = time.time() original_model = kwargs["model"] fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", []) if "fallbacks" in nested_kwargs: del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive litellm_call_id = str(uuid.uuid4()) # max time to process a request with fallbacks: default 45s while response == None and time.time() - start_time < 45: for model in fallbacks: # loop thru all models try: # check if it's dict or new model string if isinstance( model, dict ): # completion(model="gpt-4", fallbacks=[{"api_key": "", "api_base": ""}, {"api_key": "", "api_base": ""}]) kwargs["api_key"] = model.get("api_key", None) kwargs["api_base"] = model.get("api_base", None) model = model.get("model", original_model) elif ( model in rate_limited_models ): # check if model is currently cooling down if ( model_expiration_times.get(model) and time.time() >= model_expiration_times[model] ): rate_limited_models.remove( model ) # check if it's been 60s of cool down and remove model else: continue # skip model # delete model from kwargs if it exists if kwargs.get("model"): del kwargs["model"] print_verbose(f"trying to make completion call with model: {model}") kwargs["litellm_call_id"] = litellm_call_id kwargs = { **kwargs, **nested_kwargs, } # combine the openai + litellm params at the same level response = litellm.completion(**kwargs, model=model) print_verbose(f"response: {response}") if response != None: return response except Exception as e: print_verbose(e) rate_limited_models.add(model) model_expiration_times[model] = ( time.time() + 60 ) # cool down this selected model pass return response
(**kwargs)
64,278
litellm.main
config_completion
null
def config_completion(**kwargs): if litellm.config_path != None: config_args = read_config_args(litellm.config_path) # overwrite any args passed in with config args return completion(**kwargs, **config_args) else: raise ValueError( "No config path set, please set a config path using `litellm.config_path = 'path/to/config.json'`" )
(**kwargs)
64,280
litellm.utils
convert_to_model_response_object
null
def convert_to_model_response_object( response_object: Optional[dict] = None, model_response_object: Optional[ Union[ModelResponse, EmbeddingResponse, ImageResponse, TranscriptionResponse] ] = None, response_type: Literal[ "completion", "embedding", "image_generation", "audio_transcription" ] = "completion", stream=False, start_time=None, end_time=None, hidden_params: Optional[dict] = None, ): received_args = locals() try: if response_type == "completion" and ( model_response_object is None or isinstance(model_response_object, ModelResponse) ): if response_object is None or model_response_object is None: raise Exception("Error in response object format") if stream == True: # for returning cached responses, we need to yield a generator return convert_to_streaming_response(response_object=response_object) choice_list = [] assert response_object["choices"] is not None and isinstance( response_object["choices"], Iterable ) for idx, choice in enumerate(response_object["choices"]): message = Message( content=choice["message"].get("content", None), role=choice["message"]["role"], function_call=choice["message"].get("function_call", None), tool_calls=choice["message"].get("tool_calls", None), ) finish_reason = choice.get("finish_reason", None) if finish_reason == None: # gpt-4 vision can return 'finish_reason' or 'finish_details' finish_reason = choice.get("finish_details") logprobs = choice.get("logprobs", None) enhancements = choice.get("enhancements", None) choice = Choices( finish_reason=finish_reason, index=idx, message=message, logprobs=logprobs, enhancements=enhancements, ) choice_list.append(choice) model_response_object.choices = choice_list if "usage" in response_object and response_object["usage"] is not None: model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore if "created" in response_object: model_response_object.created = response_object["created"] if "id" in response_object: model_response_object.id = response_object["id"] if "system_fingerprint" in response_object: model_response_object.system_fingerprint = response_object[ "system_fingerprint" ] if "model" in response_object and model_response_object.model is None: model_response_object.model = response_object["model"] if start_time is not None and end_time is not None: if isinstance(start_time, type(end_time)): model_response_object._response_ms = ( # type: ignore end_time - start_time ).total_seconds() * 1000 if hidden_params is not None: model_response_object._hidden_params = hidden_params return model_response_object elif response_type == "embedding" and ( model_response_object is None or isinstance(model_response_object, EmbeddingResponse) ): if response_object is None: raise Exception("Error in response object format") if model_response_object is None: model_response_object = EmbeddingResponse() if "model" in response_object: model_response_object.model = response_object["model"] if "object" in response_object: model_response_object.object = response_object["object"] model_response_object.data = response_object["data"] if "usage" in response_object and response_object["usage"] is not None: model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore if start_time is not None and end_time is not None: model_response_object._response_ms = ( # type: ignore end_time - start_time ).total_seconds() * 1000 # return response latency in ms like openai if hidden_params is not None: model_response_object._hidden_params = hidden_params return model_response_object elif response_type == "image_generation" and ( model_response_object is None or isinstance(model_response_object, ImageResponse) ): if response_object is None: raise Exception("Error in response object format") if model_response_object is None: model_response_object = ImageResponse() if "created" in response_object: model_response_object.created = response_object["created"] if "data" in response_object: model_response_object.data = response_object["data"] if hidden_params is not None: model_response_object._hidden_params = hidden_params return model_response_object elif response_type == "audio_transcription" and ( model_response_object is None or isinstance(model_response_object, TranscriptionResponse) ): if response_object is None: raise Exception("Error in response object format") if model_response_object is None: model_response_object = TranscriptionResponse() if "text" in response_object: model_response_object.text = response_object["text"] if hidden_params is not None: model_response_object._hidden_params = hidden_params return model_response_object except Exception as e: raise Exception( f"Invalid response object {traceback.format_exc()}\n\nreceived_args={received_args}" )
(response_object: Optional[dict] = None, model_response_object: Union[litellm.utils.ModelResponse, litellm.utils.EmbeddingResponse, litellm.utils.ImageResponse, litellm.utils.TranscriptionResponse, NoneType] = None, response_type: Literal['completion', 'embedding', 'image_generation', 'audio_transcription'] = 'completion', stream=False, start_time=None, end_time=None, hidden_params: Optional[dict] = None)
64,281
litellm.utils
cost_per_token
Calculates the cost per token for a given model, prompt tokens, and completion tokens. Parameters: model (str): The name of the model to use. Default is "" prompt_tokens (int): The number of tokens in the prompt. completion_tokens (int): The number of tokens in the completion. Returns: tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively.
def cost_per_token( model="", prompt_tokens=0, completion_tokens=0, response_time_ms=None, custom_llm_provider=None, region_name=None, ): """ Calculates the cost per token for a given model, prompt tokens, and completion tokens. Parameters: model (str): The name of the model to use. Default is "" prompt_tokens (int): The number of tokens in the prompt. completion_tokens (int): The number of tokens in the completion. Returns: tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively. """ # given prompt_tokens_cost_usd_dollar = 0 completion_tokens_cost_usd_dollar = 0 model_cost_ref = litellm.model_cost model_with_provider = model if custom_llm_provider is not None: model_with_provider = custom_llm_provider + "/" + model if region_name is not None: model_with_provider_and_region = ( f"{custom_llm_provider}/{region_name}/{model}" ) if ( model_with_provider_and_region in model_cost_ref ): # use region based pricing, if it's available model_with_provider = model_with_provider_and_region model_without_prefix = model model_parts = model.split("/") if len(model_parts) > 1: model_without_prefix = model_parts[1] else: model_without_prefix = model """ Code block that formats model to lookup in litellm.model_cost Option1. model = "bedrock/ap-northeast-1/anthropic.claude-instant-v1". This is the most accurate since it is region based. Should always be option 1 Option2. model = "openai/gpt-4" - model = provider/model Option3. model = "anthropic.claude-3" - model = model """ if ( model_with_provider in model_cost_ref ): # Option 2. use model with provider, model = "openai/gpt-4" model = model_with_provider elif model in model_cost_ref: # Option 1. use model passed, model="gpt-4" model = model elif ( model_without_prefix in model_cost_ref ): # Option 3. if user passed model="bedrock/anthropic.claude-3", use model="anthropic.claude-3" model = model_without_prefix # see this https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models print_verbose(f"Looking up model={model} in model_cost_map") if model in model_cost_ref: print_verbose(f"Success: model={model} in model_cost_map") print_verbose( f"prompt_tokens={prompt_tokens}; completion_tokens={completion_tokens}" ) if ( model_cost_ref[model].get("input_cost_per_token", None) is not None and model_cost_ref[model].get("output_cost_per_token", None) is not None ): ## COST PER TOKEN ## prompt_tokens_cost_usd_dollar = ( model_cost_ref[model]["input_cost_per_token"] * prompt_tokens ) completion_tokens_cost_usd_dollar = ( model_cost_ref[model]["output_cost_per_token"] * completion_tokens ) elif ( model_cost_ref[model].get("output_cost_per_second", None) is not None and response_time_ms is not None ): print_verbose( f"For model={model} - output_cost_per_second: {model_cost_ref[model].get('output_cost_per_second')}; response time: {response_time_ms}" ) ## COST PER SECOND ## prompt_tokens_cost_usd_dollar = 0 completion_tokens_cost_usd_dollar = ( model_cost_ref[model]["output_cost_per_second"] * response_time_ms / 1000 ) elif ( model_cost_ref[model].get("input_cost_per_second", None) is not None and response_time_ms is not None ): print_verbose( f"For model={model} - input_cost_per_second: {model_cost_ref[model].get('input_cost_per_second')}; response time: {response_time_ms}" ) ## COST PER SECOND ## prompt_tokens_cost_usd_dollar = ( model_cost_ref[model]["input_cost_per_second"] * response_time_ms / 1000 ) completion_tokens_cost_usd_dollar = 0.0 print_verbose( f"Returned custom cost for model={model} - prompt_tokens_cost_usd_dollar: {prompt_tokens_cost_usd_dollar}, completion_tokens_cost_usd_dollar: {completion_tokens_cost_usd_dollar}" ) return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar elif "ft:gpt-3.5-turbo" in model: print_verbose(f"Cost Tracking: {model} is an OpenAI FinteTuned LLM") # fuzzy match ft:gpt-3.5-turbo:abcd-id-cool-litellm prompt_tokens_cost_usd_dollar = ( model_cost_ref["ft:gpt-3.5-turbo"]["input_cost_per_token"] * prompt_tokens ) completion_tokens_cost_usd_dollar = ( model_cost_ref["ft:gpt-3.5-turbo"]["output_cost_per_token"] * completion_tokens ) return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar elif model in litellm.azure_llms: verbose_logger.debug(f"Cost Tracking: {model} is an Azure LLM") model = litellm.azure_llms[model] verbose_logger.debug( f"applying cost={model_cost_ref[model]['input_cost_per_token']} for prompt_tokens={prompt_tokens}" ) prompt_tokens_cost_usd_dollar = ( model_cost_ref[model]["input_cost_per_token"] * prompt_tokens ) verbose_logger.debug( f"applying cost={model_cost_ref[model]['output_cost_per_token']} for completion_tokens={completion_tokens}" ) completion_tokens_cost_usd_dollar = ( model_cost_ref[model]["output_cost_per_token"] * completion_tokens ) return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar elif model in litellm.azure_embedding_models: verbose_logger.debug(f"Cost Tracking: {model} is an Azure Embedding Model") model = litellm.azure_embedding_models[model] prompt_tokens_cost_usd_dollar = ( model_cost_ref[model]["input_cost_per_token"] * prompt_tokens ) completion_tokens_cost_usd_dollar = ( model_cost_ref[model]["output_cost_per_token"] * completion_tokens ) return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar else: # if model is not in model_prices_and_context_window.json. Raise an exception-let users know error_str = f"Model not in model_prices_and_context_window.json. You passed model={model}. Register pricing for model - https://docs.litellm.ai/docs/proxy/custom_pricing\n" raise litellm.exceptions.NotFoundError( # type: ignore message=error_str, model=model, response=httpx.Response( status_code=404, content=error_str, request=httpx.Request(method="cost_per_token", url="https://github.com/BerriAI/litellm"), # type: ignore ), llm_provider="", )
(model='', prompt_tokens=0, completion_tokens=0, response_time_ms=None, custom_llm_provider=None, region_name=None)
64,282
litellm.utils
create_pretrained_tokenizer
Creates a tokenizer from an existing file on a HuggingFace repository to be used with `token_counter`. Args: identifier (str): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (str, defaults to main): A branch or commit id auth_token (str, optional, defaults to None): An optional auth token used to access private repositories on the Hugging Face Hub Returns: dict: A dictionary with the tokenizer and its type.
def create_pretrained_tokenizer( identifier: str, revision="main", auth_token: Optional[str] = None ): """ Creates a tokenizer from an existing file on a HuggingFace repository to be used with `token_counter`. Args: identifier (str): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (str, defaults to main): A branch or commit id auth_token (str, optional, defaults to None): An optional auth token used to access private repositories on the Hugging Face Hub Returns: dict: A dictionary with the tokenizer and its type. """ tokenizer = Tokenizer.from_pretrained( identifier, revision=revision, auth_token=auth_token ) return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
(identifier: str, revision='main', auth_token: Optional[str] = None)
64,283
litellm.assistants.main
create_thread
- get the llm provider - if openai - route it there - pass through relevant params ``` from litellm import create_thread create_thread( custom_llm_provider="openai", ### OPTIONAL ### messages = { "role": "user", "content": "Hello, what is AI?" }, { "role": "user", "content": "How does AI work? Explain it in simple terms." }] ) ```
def create_thread( custom_llm_provider: Literal["openai"], messages: Optional[Iterable[OpenAICreateThreadParamsMessage]] = None, metadata: Optional[dict] = None, tool_resources: Optional[OpenAICreateThreadParamsToolResources] = None, client: Optional[OpenAI] = None, **kwargs, ) -> Thread: """ - get the llm provider - if openai - route it there - pass through relevant params ``` from litellm import create_thread create_thread( custom_llm_provider="openai", ### OPTIONAL ### messages = { "role": "user", "content": "Hello, what is AI?" }, { "role": "user", "content": "How does AI work? Explain it in simple terms." }] ) ``` """ optional_params = GenericLiteLLMParams(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default if ( timeout is not None and isinstance(timeout, httpx.Timeout) and supports_httpx_timeout(custom_llm_provider) == False ): read_timeout = timeout.read or 600 timeout = read_timeout # default 10 min timeout elif timeout is not None and not isinstance(timeout, httpx.Timeout): timeout = float(timeout) # type: ignore elif timeout is None: timeout = 600.0 response: Optional[Thread] = None if custom_llm_provider == "openai": api_base = ( optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there or litellm.api_base or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1" ) organization = ( optional_params.organization or litellm.organization or os.getenv("OPENAI_ORGANIZATION", None) or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there or litellm.openai_key or os.getenv("OPENAI_API_KEY") ) response = openai_assistants_api.create_thread( messages=messages, metadata=metadata, api_base=api_base, api_key=api_key, timeout=timeout, max_retries=optional_params.max_retries, organization=organization, client=client, ) else: raise litellm.exceptions.BadRequestError( message="LiteLLM doesn't support {} for 'create_thread'. Only 'openai' is supported.".format( custom_llm_provider ), model="n/a", llm_provider=custom_llm_provider, response=httpx.Response( status_code=400, content="Unsupported provider", request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response
(custom_llm_provider: Literal['openai'], messages: Optional[Iterable[openai.types.beta.thread_create_params.Message]] = None, metadata: Optional[dict] = None, tool_resources: Optional[litellm.types.llms.openai.OpenAICreateThreadParamsToolResources] = None, client: Optional[openai.OpenAI] = None, **kwargs) -> litellm.types.llms.openai.Thread
64,284
litellm.utils
create_tokenizer
Creates a tokenizer from a valid JSON string for use with `token_counter`. Args: json (str): A valid JSON string representing a previously serialized tokenizer Returns: dict: A dictionary with the tokenizer and its type.
def create_tokenizer(json: str): """ Creates a tokenizer from a valid JSON string for use with `token_counter`. Args: json (str): A valid JSON string representing a previously serialized tokenizer Returns: dict: A dictionary with the tokenizer and its type. """ tokenizer = Tokenizer.from_str(json) return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
(json: str)
64,286
litellm.llms.prompt_templates.factory
custom_prompt
null
def custom_prompt( role_dict: dict, messages: list, initial_prompt_value: str = "", final_prompt_value: str = "", bos_token: str = "", eos_token: str = "", ): prompt = bos_token + initial_prompt_value bos_open = True ## a bos token is at the start of a system / human message ## an eos token is at the end of the assistant response to the message for message in messages: role = message["role"] if role in ["system", "human"] and not bos_open: prompt += bos_token bos_open = True pre_message_str = ( role_dict[role]["pre_message"] if role in role_dict and "pre_message" in role_dict[role] else "" ) post_message_str = ( role_dict[role]["post_message"] if role in role_dict and "post_message" in role_dict[role] else "" ) prompt += pre_message_str + message["content"] + post_message_str if role == "assistant": prompt += eos_token bos_open = False prompt += final_prompt_value return prompt
(role_dict: dict, messages: list, initial_prompt_value: str = '', final_prompt_value: str = '', bos_token: str = '', eos_token: str = '')
64,289
litellm.utils
decode
null
def decode(model="", tokens: List[int] = [], custom_tokenizer: Optional[dict] = None): tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) dec = tokenizer_json["tokenizer"].decode(tokens) return dec
(model='', tokens: List[int] = [], custom_tokenizer: Optional[dict] = None)
64,291
litellm.caching
disable_cache
Disable the cache used by LiteLLM. This function disables the cache used by the LiteLLM module. It removes the cache-related callbacks from the input_callback, success_callback, and _async_success_callback lists. It also sets the litellm.cache attribute to None. Parameters: None Returns: None
def disable_cache(): """ Disable the cache used by LiteLLM. This function disables the cache used by the LiteLLM module. It removes the cache-related callbacks from the input_callback, success_callback, and _async_success_callback lists. It also sets the litellm.cache attribute to None. Parameters: None Returns: None """ from contextlib import suppress print_verbose("LiteLLM: Disabling Cache") with suppress(ValueError): litellm.input_callback.remove("cache") litellm.success_callback.remove("cache") litellm._async_success_callback.remove("cache") litellm.cache = None print_verbose(f"LiteLLM: Cache disabled, litellm.cache={litellm.cache}")
()
64,294
litellm.main
embedding
Embedding function that calls an API to generate embeddings for the given input. Parameters: - model: The embedding model to use. - input: The input for which embeddings are to be generated. - dimensions: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. - timeout: The timeout value for the API call, default 10 mins - litellm_call_id: The call ID for litellm logging. - litellm_logging_obj: The litellm logging object. - logger_fn: The logger function. - api_base: Optional. The base URL for the API. - api_version: Optional. The version of the API. - api_key: Optional. The API key to use. - api_type: Optional. The type of the API. - caching: A boolean indicating whether to enable caching. - custom_llm_provider: The custom llm provider. Returns: - response: The response received from the API call. Raises: - exception_type: If an exception occurs during the API call.
def embedding( model, input=[], # Optional params dimensions: Optional[int] = None, timeout=600, # default to 10 minutes # set api_base, api_version, api_key api_base: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool = False, user: Optional[str] = None, custom_llm_provider=None, litellm_call_id=None, litellm_logging_obj=None, logger_fn=None, **kwargs, ): """ Embedding function that calls an API to generate embeddings for the given input. Parameters: - model: The embedding model to use. - input: The input for which embeddings are to be generated. - dimensions: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. - timeout: The timeout value for the API call, default 10 mins - litellm_call_id: The call ID for litellm logging. - litellm_logging_obj: The litellm logging object. - logger_fn: The logger function. - api_base: Optional. The base URL for the API. - api_version: Optional. The version of the API. - api_key: Optional. The API key to use. - api_type: Optional. The type of the API. - caching: A boolean indicating whether to enable caching. - custom_llm_provider: The custom llm provider. Returns: - response: The response received from the API call. Raises: - exception_type: If an exception occurs during the API call. """ azure = kwargs.get("azure", None) client = kwargs.pop("client", None) rpm = kwargs.pop("rpm", None) tpm = kwargs.pop("tpm", None) max_parallel_requests = kwargs.pop("max_parallel_requests", None) model_info = kwargs.get("model_info", None) metadata = kwargs.get("metadata", None) encoding_format = kwargs.get("encoding_format", None) proxy_server_request = kwargs.get("proxy_server_request", None) aembedding = kwargs.get("aembedding", None) ### CUSTOM MODEL COST ### input_cost_per_token = kwargs.get("input_cost_per_token", None) output_cost_per_token = kwargs.get("output_cost_per_token", None) input_cost_per_second = kwargs.get("input_cost_per_second", None) output_cost_per_second = kwargs.get("output_cost_per_second", None) openai_params = [ "user", "dimensions", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "max_retries", "encoding_format", ] litellm_params = [ "metadata", "aembedding", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "retry_policy", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "max_parallel_requests", "input_cost_per_token", "output_cost_per_token", "input_cost_per_second", "output_cost_per_second", "hf_model_name", "proxy_server_request", "model_info", "preset_cache_key", "caching_groups", "ttl", "cache", "no-log", "region_name", "allowed_model_region", ] default_params = openai_params + litellm_params non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key, ) optional_params = get_optional_params_embeddings( model=model, user=user, dimensions=dimensions, encoding_format=encoding_format, custom_llm_provider=custom_llm_provider, **non_default_params, ) ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ### if input_cost_per_token is not None and output_cost_per_token is not None: litellm.register_model( { model: { "input_cost_per_token": input_cost_per_token, "output_cost_per_token": output_cost_per_token, "litellm_provider": custom_llm_provider, } } ) if input_cost_per_second is not None: # time based pricing just needs cost in place output_cost_per_second = output_cost_per_second or 0.0 litellm.register_model( { model: { "input_cost_per_second": input_cost_per_second, "output_cost_per_second": output_cost_per_second, "litellm_provider": custom_llm_provider, } } ) try: response = None logging = litellm_logging_obj logging.update_environment_variables( model=model, user=user, optional_params=optional_params, litellm_params={ "timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info, "metadata": metadata, "aembedding": aembedding, "preset_cache_key": None, "stream_response": {}, }, ) if azure == True or custom_llm_provider == "azure": # azure configs api_type = get_secret("AZURE_API_TYPE") or "azure" api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE") api_version = ( api_version or litellm.api_version or get_secret("AZURE_API_VERSION") ) azure_ad_token = optional_params.pop("azure_ad_token", None) or get_secret( "AZURE_AD_TOKEN" ) api_key = ( api_key or litellm.api_key or litellm.azure_key or get_secret("AZURE_API_KEY") ) ## EMBEDDING CALL response = azure_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif ( model in litellm.open_ai_embedding_models or custom_llm_provider == "openai" ): api_base = ( api_base or litellm.api_base or get_secret("OPENAI_API_BASE") or "https://api.openai.com/v1" ) openai.organization = ( litellm.organization or get_secret("OPENAI_ORGANIZATION") or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( api_key or litellm.api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") ) api_type = "openai" api_version = None ## EMBEDDING CALL response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "cohere": cohere_key = ( api_key or litellm.cohere_key or get_secret("COHERE_API_KEY") or get_secret("CO_API_KEY") or litellm.api_key ) response = cohere.embedding( model=model, input=input, optional_params=optional_params, encoding=encoding, api_key=cohere_key, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "huggingface": api_key = ( api_key or litellm.huggingface_key or get_secret("HUGGINGFACE_API_KEY") or litellm.api_key ) response = huggingface.embedding( model=model, input=input, encoding=encoding, api_key=api_key, api_base=api_base, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "bedrock": response = bedrock.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "triton": if api_base is None: raise ValueError( "api_base is required for triton. Please pass `api_base`" ) response = triton_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "vertex_ai": vertex_ai_project = ( optional_params.pop("vertex_project", None) or optional_params.pop("vertex_ai_project", None) or litellm.vertex_project or get_secret("VERTEXAI_PROJECT") or get_secret("VERTEX_PROJECT") ) vertex_ai_location = ( optional_params.pop("vertex_location", None) or optional_params.pop("vertex_ai_location", None) or litellm.vertex_location or get_secret("VERTEXAI_LOCATION") or get_secret("VERTEX_LOCATION") ) vertex_credentials = ( optional_params.pop("vertex_credentials", None) or optional_params.pop("vertex_ai_credentials", None) or get_secret("VERTEXAI_CREDENTIALS") or get_secret("VERTEX_CREDENTIALS") ) response = vertex_ai.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), vertex_project=vertex_ai_project, vertex_location=vertex_ai_location, vertex_credentials=vertex_credentials, aembedding=aembedding, print_verbose=print_verbose, ) elif custom_llm_provider == "oobabooga": response = oobabooga.embedding( model=model, input=input, encoding=encoding, api_base=api_base, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "ollama": api_base = ( litellm.api_base or api_base or get_secret("OLLAMA_API_BASE") or "http://localhost:11434" ) if isinstance(input, str): input = [input] if not all(isinstance(item, str) for item in input): raise litellm.BadRequestError( message=f"Invalid input for ollama embeddings. input={input}", model=model, # type: ignore llm_provider="ollama", # type: ignore ) ollama_embeddings_fn = ( ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings ) response = ollama_embeddings_fn( api_base=api_base, model=model, prompts=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "sagemaker": response = sagemaker.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), print_verbose=print_verbose, ) elif custom_llm_provider == "mistral": api_key = api_key or litellm.api_key or get_secret("MISTRAL_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "voyage": api_key = api_key or litellm.api_key or get_secret("VOYAGE_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "xinference": api_key = ( api_key or litellm.api_key or get_secret("XINFERENCE_API_KEY") or "stub-xinference-key" ) # xinference does not need an api key, pass a stub key if user did not set one api_base = ( api_base or litellm.api_base or get_secret("XINFERENCE_API_BASE") or "http://127.0.0.1:9997/v1" ) response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "watsonx": response = watsonx.IBMWatsonXAI().embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) else: args = locals() raise ValueError(f"No valid embedding model args passed in - {args}") if response is not None and hasattr(response, "_hidden_params"): response._hidden_params["custom_llm_provider"] = custom_llm_provider return response except Exception as e: ## LOGGING logging.post_call( input=input, api_key=api_key, original_response=str(e), ) ## Map to OpenAI Exception raise exception_type( model=model, original_exception=e, custom_llm_provider=custom_llm_provider, extra_kwargs=kwargs, )
(model, input=[], dimensions: Optional[int] = None, timeout=600, api_base: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool = False, user: Optional[str] = None, custom_llm_provider=None, litellm_call_id=None, litellm_logging_obj=None, logger_fn=None, **kwargs)
64,295
litellm.caching
enable_cache
Enable cache with the specified configuration. Args: type (Optional[Literal["local", "redis"]]): The type of cache to enable. Defaults to "local". host (Optional[str]): The host address of the cache server. Defaults to None. port (Optional[str]): The port number of the cache server. Defaults to None. password (Optional[str]): The password for the cache server. Defaults to None. supported_call_types (Optional[List[Literal["completion", "acompletion", "embedding", "aembedding"]]]): The supported call types for the cache. Defaults to ["completion", "acompletion", "embedding", "aembedding"]. **kwargs: Additional keyword arguments. Returns: None Raises: None
def enable_cache( type: Optional[Literal["local", "redis", "s3"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, supported_call_types: Optional[ List[ Literal[ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ] ] ] = [ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ], **kwargs, ): """ Enable cache with the specified configuration. Args: type (Optional[Literal["local", "redis"]]): The type of cache to enable. Defaults to "local". host (Optional[str]): The host address of the cache server. Defaults to None. port (Optional[str]): The port number of the cache server. Defaults to None. password (Optional[str]): The password for the cache server. Defaults to None. supported_call_types (Optional[List[Literal["completion", "acompletion", "embedding", "aembedding"]]]): The supported call types for the cache. Defaults to ["completion", "acompletion", "embedding", "aembedding"]. **kwargs: Additional keyword arguments. Returns: None Raises: None """ print_verbose("LiteLLM: Enabling Cache") if "cache" not in litellm.input_callback: litellm.input_callback.append("cache") if "cache" not in litellm.success_callback: litellm.success_callback.append("cache") if "cache" not in litellm._async_success_callback: litellm._async_success_callback.append("cache") if litellm.cache == None: litellm.cache = Cache( type=type, host=host, port=port, password=password, supported_call_types=supported_call_types, **kwargs, ) print_verbose(f"LiteLLM: Cache enabled, litellm.cache={litellm.cache}") print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}")
(type: Optional[Literal['local', 'redis', 's3']] = 'local', host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, supported_call_types: Optional[List[Literal['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription']]] = ['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription'], **kwargs)
64,296
litellm.utils
encode
Encodes the given text using the specified model. Args: model (str): The name of the model to use for tokenization. custom_tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. text (str): The text to be encoded. Returns: enc: The encoded text.
def encode(model="", text="", custom_tokenizer: Optional[dict] = None): """ Encodes the given text using the specified model. Args: model (str): The name of the model to use for tokenization. custom_tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. text (str): The text to be encoded. Returns: enc: The encoded text. """ tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) enc = tokenizer_json["tokenizer"].encode(text) return enc
(model='', text='', custom_tokenizer: Optional[dict] = None)
64,298
litellm.utils
exception_type
null
def exception_type( model, original_exception, custom_llm_provider, completion_kwargs={}, extra_kwargs={}, ): global user_logger_fn, liteDebuggerClient exception_mapping_worked = False if litellm.suppress_debug_info is False: print() # noqa print( # noqa "\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m" # noqa ) # noqa print( # noqa "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'." # noqa ) # noqa print() # noqa try: if model: error_str = str(original_exception) if isinstance(original_exception, BaseException): exception_type = type(original_exception).__name__ else: exception_type = "" ################################################################################ # Common Extra information needed for all providers # We pass num retries, api_base, vertex_deployment etc to the exception here ################################################################################ _api_base = litellm.get_api_base(model=model, optional_params=extra_kwargs) messages = litellm.get_first_chars_messages(kwargs=completion_kwargs) _vertex_project = extra_kwargs.get("vertex_project") _vertex_location = extra_kwargs.get("vertex_location") _metadata = extra_kwargs.get("metadata", {}) or {} _model_group = _metadata.get("model_group") _deployment = _metadata.get("deployment") extra_information = f"\nModel: {model}" if _api_base: extra_information += f"\nAPI Base: {_api_base}" if messages and len(messages) > 0: extra_information += f"\nMessages: {messages}" if _model_group is not None: extra_information += f"\nmodel_group: {_model_group}\n" if _deployment is not None: extra_information += f"\ndeployment: {_deployment}\n" if _vertex_project is not None: extra_information += f"\nvertex_project: {_vertex_project}\n" if _vertex_location is not None: extra_information += f"\nvertex_location: {_vertex_location}\n" # on litellm proxy add key name + team to exceptions extra_information = _add_key_name_and_team_to_alert( request_info=extra_information, metadata=_metadata ) ################################################################################ # End of Common Extra information Needed for all providers ################################################################################ ################################################################################ #################### Start of Provider Exception mapping #################### ################################################################################ if "Request Timeout Error" in error_str or "Request timed out" in error_str: exception_mapping_worked = True raise Timeout( message=f"APITimeoutError - Request timed out. {extra_information} \n error_str: {error_str}", model=model, llm_provider=custom_llm_provider, ) if ( custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai" or custom_llm_provider in litellm.openai_compatible_providers ): # custom_llm_provider is openai, make it OpenAI if hasattr(original_exception, "message"): message = original_exception.message else: message = str(original_exception) if message is not None and isinstance(message, str): message = message.replace("OPENAI", custom_llm_provider.upper()) message = message.replace("openai", custom_llm_provider) message = message.replace("OpenAI", custom_llm_provider) if custom_llm_provider == "openai": exception_provider = "OpenAI" + "Exception" else: exception_provider = ( custom_llm_provider[0].upper() + custom_llm_provider[1:] + "Exception" ) if "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif ( "invalid_request_error" in error_str and "model_not_found" in error_str ): exception_mapping_worked = True raise NotFoundError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif ( "invalid_request_error" in error_str and "content_policy_violation" in error_str ): exception_mapping_worked = True raise ContentPolicyViolationError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif ( "invalid_request_error" in error_str and "Incorrect API key provided" not in error_str ): exception_mapping_worked = True raise BadRequestError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif "Request too large" in error_str: raise RateLimitError( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) elif ( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" in error_str ): exception_mapping_worked = True raise AuthenticationError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif "Mistral API raised a streaming error" in error_str: exception_mapping_worked = True _request = httpx.Request( method="POST", url="https://api.openai.com/v1" ) raise APIError( status_code=500, message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, request=_request, ) elif hasattr(original_exception, "status_code"): exception_mapping_worked = True if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif original_exception.status_code == 404: exception_mapping_worked = True raise NotFoundError( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, ) elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 504: # gateway timeout error exception_mapping_worked = True raise Timeout( message=f"{exception_provider} - {message} {extra_information}", model=model, llm_provider=custom_llm_provider, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, request=original_exception.request, ) else: # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors raise APIConnectionError( message=f"{exception_provider} - {message} {extra_information}", llm_provider=custom_llm_provider, model=model, request=httpx.Request( method="POST", url="https://api.openai.com/v1/" ), ) elif custom_llm_provider == "anthropic": # one of the anthropics if hasattr(original_exception, "message"): if ( "prompt is too long" in original_exception.message or "prompt: length" in original_exception.message ): exception_mapping_worked = True raise ContextWindowExceededError( message=original_exception.message, model=model, llm_provider="anthropic", response=original_exception.response, ) if "Invalid API Key" in original_exception.message: exception_mapping_worked = True raise AuthenticationError( message=original_exception.message, model=model, llm_provider="anthropic", response=original_exception.response, ) if hasattr(original_exception, "status_code"): print_verbose(f"status_code: {original_exception.status_code}") if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"AnthropicException - {original_exception.message}", llm_provider="anthropic", model=model, response=original_exception.response, ) elif ( original_exception.status_code == 400 or original_exception.status_code == 413 ): exception_mapping_worked = True raise BadRequestError( message=f"AnthropicException - {original_exception.message}", model=model, llm_provider="anthropic", response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"AnthropicException - {original_exception.message}", model=model, llm_provider="anthropic", ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"AnthropicException - {original_exception.message}", llm_provider="anthropic", model=model, response=original_exception.response, ) elif original_exception.status_code == 500: exception_mapping_worked = True raise APIError( status_code=500, message=f"AnthropicException - {original_exception.message}. Handle with `litellm.APIError`.", llm_provider="anthropic", model=model, request=original_exception.request, ) elif custom_llm_provider == "replicate": if "Incorrect authentication token" in error_str: exception_mapping_worked = True raise AuthenticationError( message=f"ReplicateException - {error_str}", llm_provider="replicate", model=model, response=original_exception.response, ) elif "input is too long" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"ReplicateException - {error_str}", model=model, llm_provider="replicate", response=original_exception.response, ) elif exception_type == "ModelError": exception_mapping_worked = True raise BadRequestError( message=f"ReplicateException - {error_str}", model=model, llm_provider="replicate", response=original_exception.response, ) elif "Request was throttled" in error_str: exception_mapping_worked = True raise RateLimitError( message=f"ReplicateException - {error_str}", llm_provider="replicate", model=model, response=original_exception.response, ) elif hasattr(original_exception, "status_code"): if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"ReplicateException - {original_exception.message}", llm_provider="replicate", model=model, response=original_exception.response, ) elif ( original_exception.status_code == 400 or original_exception.status_code == 422 or original_exception.status_code == 413 ): exception_mapping_worked = True raise BadRequestError( message=f"ReplicateException - {original_exception.message}", model=model, llm_provider="replicate", response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"ReplicateException - {original_exception.message}", model=model, llm_provider="replicate", ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"ReplicateException - {original_exception.message}", llm_provider="replicate", model=model, response=original_exception.response, ) elif original_exception.status_code == 500: exception_mapping_worked = True raise ServiceUnavailableError( message=f"ReplicateException - {original_exception.message}", llm_provider="replicate", model=model, response=original_exception.response, ) exception_mapping_worked = True raise APIError( status_code=500, message=f"ReplicateException - {str(original_exception)}", llm_provider="replicate", model=model, request=original_exception.request, ) elif custom_llm_provider == "bedrock": if ( "too many tokens" in error_str or "expected maxLength:" in error_str or "Input is too long" in error_str or "prompt: length: 1.." in error_str or "Too many input tokens" in error_str ): exception_mapping_worked = True raise ContextWindowExceededError( message=f"BedrockException: Context Window Error - {error_str}", model=model, llm_provider="bedrock", response=original_exception.response, ) if "Malformed input request" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"BedrockException - {error_str}", model=model, llm_provider="bedrock", response=original_exception.response, ) if ( "Unable to locate credentials" in error_str or "The security token included in the request is invalid" in error_str ): exception_mapping_worked = True raise AuthenticationError( message=f"BedrockException Invalid Authentication - {error_str}", model=model, llm_provider="bedrock", response=original_exception.response, ) if "AccessDeniedException" in error_str: exception_mapping_worked = True raise PermissionDeniedError( message=f"BedrockException PermissionDeniedError - {error_str}", model=model, llm_provider="bedrock", response=original_exception.response, ) if ( "throttlingException" in error_str or "ThrottlingException" in error_str ): exception_mapping_worked = True raise RateLimitError( message=f"BedrockException: Rate Limit Error - {error_str}", model=model, llm_provider="bedrock", response=original_exception.response, ) if "Connect timeout on endpoint URL" in error_str: exception_mapping_worked = True raise Timeout( message=f"BedrockException: Timeout Error - {error_str}", model=model, llm_provider="bedrock", ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 500: exception_mapping_worked = True raise ServiceUnavailableError( message=f"BedrockException - {original_exception.message}", llm_provider="bedrock", model=model, response=httpx.Response( status_code=500, request=httpx.Request( method="POST", url="https://api.openai.com/v1/" ), ), ) elif original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"BedrockException - {original_exception.message}", llm_provider="bedrock", model=model, response=original_exception.response, ) elif original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( message=f"BedrockException - {original_exception.message}", llm_provider="bedrock", model=model, response=original_exception.response, ) elif original_exception.status_code == 404: exception_mapping_worked = True raise NotFoundError( message=f"BedrockException - {original_exception.message}", llm_provider="bedrock", model=model, response=original_exception.response, ) elif custom_llm_provider == "sagemaker": if "Unable to locate credentials" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"SagemakerException - {error_str}", model=model, llm_provider="sagemaker", response=original_exception.response, ) elif ( "Input validation error: `best_of` must be > 0 and <= 2" in error_str ): exception_mapping_worked = True raise BadRequestError( message=f"SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints", model=model, llm_provider="sagemaker", response=original_exception.response, ) elif ( "`inputs` tokens + `max_new_tokens` must be <=" in error_str or "instance type with more CPU capacity or memory" in error_str ): exception_mapping_worked = True raise ContextWindowExceededError( message=f"SagemakerException - {error_str}", model=model, llm_provider="sagemaker", response=original_exception.response, ) elif custom_llm_provider == "vertex_ai": if ( "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str ): exception_mapping_worked = True raise BadRequestError( message=f"VertexAIException - {error_str} {extra_information}", model=model, llm_provider="vertex_ai", response=original_exception.response, ) elif ( "None Unknown Error." in error_str or "Content has no parts." in error_str ): exception_mapping_worked = True raise APIError( message=f"VertexAIException - {error_str} {extra_information}", status_code=500, model=model, llm_provider="vertex_ai", request=original_exception.request, ) elif "403" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"VertexAIException - {error_str} {extra_information}", model=model, llm_provider="vertex_ai", response=original_exception.response, ) elif "The response was blocked." in error_str: exception_mapping_worked = True raise UnprocessableEntityError( message=f"VertexAIException - {error_str} {extra_information}", model=model, llm_provider="vertex_ai", response=httpx.Response( status_code=429, request=httpx.Request( method="POST", url=" https://cloud.google.com/vertex-ai/", ), ), ) elif ( "429 Quota exceeded" in error_str or "IndexError: list index out of range" in error_str or "429 Unable to submit request because the service is temporarily out of capacity." in error_str ): exception_mapping_worked = True raise RateLimitError( message=f"VertexAIException - {error_str} {extra_information}", model=model, llm_provider="vertex_ai", response=httpx.Response( status_code=429, request=httpx.Request( method="POST", url=" https://cloud.google.com/vertex-ai/", ), ), ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( message=f"VertexAIException - {error_str} {extra_information}", model=model, llm_provider="vertex_ai", response=original_exception.response, ) if original_exception.status_code == 500: exception_mapping_worked = True raise APIError( message=f"VertexAIException - {error_str} {extra_information}", status_code=500, model=model, llm_provider="vertex_ai", request=original_exception.request, ) elif custom_llm_provider == "palm" or custom_llm_provider == "gemini": if "503 Getting metadata" in error_str: # auth errors look like this # 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate. exception_mapping_worked = True raise BadRequestError( message=f"GeminiException - Invalid api key", model=model, llm_provider="palm", response=original_exception.response, ) if ( "504 Deadline expired before operation could complete." in error_str or "504 Deadline Exceeded" in error_str ): exception_mapping_worked = True raise Timeout( message=f"GeminiException - {original_exception.message}", model=model, llm_provider="palm", ) if "400 Request payload size exceeds" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, ) if ( "500 An internal error has occurred." in error_str or "list index out of range" in error_str ): exception_mapping_worked = True raise APIError( status_code=getattr(original_exception, "status_code", 500), message=f"GeminiException - {original_exception.message}", llm_provider="palm", model=model, request=httpx.Response( status_code=429, request=httpx.Request( method="POST", url=" https://cloud.google.com/vertex-ai/", ), ), ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, ) # Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes elif custom_llm_provider == "cloudflare": if "Authentication error" in error_str: exception_mapping_worked = True raise AuthenticationError( message=f"Cloudflare Exception - {original_exception.message}", llm_provider="cloudflare", model=model, response=original_exception.response, ) if "must have required property" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"Cloudflare Exception - {original_exception.message}", llm_provider="cloudflare", model=model, response=original_exception.response, ) elif ( custom_llm_provider == "cohere" or custom_llm_provider == "cohere_chat" ): # Cohere if ( "invalid api token" in error_str or "No API key provided." in error_str ): exception_mapping_worked = True raise AuthenticationError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) elif "too many tokens" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"CohereException - {original_exception.message}", model=model, llm_provider="cohere", response=original_exception.response, ) elif hasattr(original_exception, "status_code"): if ( original_exception.status_code == 400 or original_exception.status_code == 498 ): exception_mapping_worked = True raise BadRequestError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) elif original_exception.status_code == 500: exception_mapping_worked = True raise ServiceUnavailableError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) elif ( "CohereConnectionError" in exception_type ): # cohere seems to fire these errors when we load test it (1k+ messages / min) exception_mapping_worked = True raise RateLimitError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) elif "invalid type:" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) elif "Unexpected server error" in error_str: exception_mapping_worked = True raise ServiceUnavailableError( message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, response=original_exception.response, ) else: if hasattr(original_exception, "status_code"): exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"CohereException - {original_exception.message}", llm_provider="cohere", model=model, request=original_exception.request, ) raise original_exception elif custom_llm_provider == "huggingface": if "length limit exceeded" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=error_str, model=model, llm_provider="huggingface", response=original_exception.response, ) elif "A valid user token is required" in error_str: exception_mapping_worked = True raise BadRequestError( message=error_str, llm_provider="huggingface", model=model, response=original_exception.response, ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"HuggingfaceException - {original_exception.message}", llm_provider="huggingface", model=model, response=original_exception.response, ) elif original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( message=f"HuggingfaceException - {original_exception.message}", model=model, llm_provider="huggingface", response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"HuggingfaceException - {original_exception.message}", model=model, llm_provider="huggingface", ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"HuggingfaceException - {original_exception.message}", llm_provider="huggingface", model=model, response=original_exception.response, ) elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( message=f"HuggingfaceException - {original_exception.message}", llm_provider="huggingface", model=model, response=original_exception.response, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"HuggingfaceException - {original_exception.message}", llm_provider="huggingface", model=model, request=original_exception.request, ) elif custom_llm_provider == "ai21": if hasattr(original_exception, "message"): if "Prompt has too many tokens" in original_exception.message: exception_mapping_worked = True raise ContextWindowExceededError( message=f"AI21Exception - {original_exception.message}", model=model, llm_provider="ai21", response=original_exception.response, ) if "Bad or missing API token." in original_exception.message: exception_mapping_worked = True raise BadRequestError( message=f"AI21Exception - {original_exception.message}", model=model, llm_provider="ai21", response=original_exception.response, ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"AI21Exception - {original_exception.message}", llm_provider="ai21", model=model, response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"AI21Exception - {original_exception.message}", model=model, llm_provider="ai21", ) if original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( message=f"AI21Exception - {original_exception.message}", model=model, llm_provider="ai21", response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"AI21Exception - {original_exception.message}", llm_provider="ai21", model=model, response=original_exception.response, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"AI21Exception - {original_exception.message}", llm_provider="ai21", model=model, request=original_exception.request, ) elif custom_llm_provider == "nlp_cloud": if "detail" in error_str: if "Input text length should not exceed" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"NLPCloudException - {error_str}", model=model, llm_provider="nlp_cloud", response=original_exception.response, ) elif "value is not a valid" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"NLPCloudException - {error_str}", model=model, llm_provider="nlp_cloud", response=original_exception.response, ) else: exception_mapping_worked = True raise APIError( status_code=500, message=f"NLPCloudException - {error_str}", model=model, llm_provider="nlp_cloud", request=original_exception.request, ) if hasattr( original_exception, "status_code" ): # https://docs.nlpcloud.com/?shell#errors if ( original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422 ): exception_mapping_worked = True raise BadRequestError( message=f"NLPCloudException - {original_exception.message}", llm_provider="nlp_cloud", model=model, response=original_exception.response, ) elif ( original_exception.status_code == 401 or original_exception.status_code == 403 ): exception_mapping_worked = True raise AuthenticationError( message=f"NLPCloudException - {original_exception.message}", llm_provider="nlp_cloud", model=model, response=original_exception.response, ) elif ( original_exception.status_code == 522 or original_exception.status_code == 524 ): exception_mapping_worked = True raise Timeout( message=f"NLPCloudException - {original_exception.message}", model=model, llm_provider="nlp_cloud", ) elif ( original_exception.status_code == 429 or original_exception.status_code == 402 ): exception_mapping_worked = True raise RateLimitError( message=f"NLPCloudException - {original_exception.message}", llm_provider="nlp_cloud", model=model, response=original_exception.response, ) elif ( original_exception.status_code == 500 or original_exception.status_code == 503 ): exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"NLPCloudException - {original_exception.message}", llm_provider="nlp_cloud", model=model, request=original_exception.request, ) elif ( original_exception.status_code == 504 or original_exception.status_code == 520 ): exception_mapping_worked = True raise ServiceUnavailableError( message=f"NLPCloudException - {original_exception.message}", model=model, llm_provider="nlp_cloud", response=original_exception.response, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"NLPCloudException - {original_exception.message}", llm_provider="nlp_cloud", model=model, request=original_exception.request, ) elif custom_llm_provider == "together_ai": import json try: error_response = json.loads(error_str) except: error_response = {"error": error_str} if ( "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"] ): exception_mapping_worked = True raise ContextWindowExceededError( message=f"TogetherAIException - {error_response['error']}", model=model, llm_provider="together_ai", response=original_exception.response, ) elif ( "error" in error_response and "invalid private key" in error_response["error"] ): exception_mapping_worked = True raise AuthenticationError( message=f"TogetherAIException - {error_response['error']}", llm_provider="together_ai", model=model, response=original_exception.response, ) elif ( "error" in error_response and "INVALID_ARGUMENT" in error_response["error"] ): exception_mapping_worked = True raise BadRequestError( message=f"TogetherAIException - {error_response['error']}", model=model, llm_provider="together_ai", response=original_exception.response, ) elif ( "error" in error_response and "API key doesn't match expected format." in error_response["error"] ): exception_mapping_worked = True raise BadRequestError( message=f"TogetherAIException - {error_response['error']}", model=model, llm_provider="together_ai", response=original_exception.response, ) elif ( "error_type" in error_response and error_response["error_type"] == "validation" ): exception_mapping_worked = True raise BadRequestError( message=f"TogetherAIException - {error_response['error']}", model=model, llm_provider="together_ai", response=original_exception.response, ) if hasattr(original_exception, "status_code"): if original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"TogetherAIException - {original_exception.message}", model=model, llm_provider="together_ai", ) elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( message=f"TogetherAIException - {error_response['error']}", model=model, llm_provider="together_ai", response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"TogetherAIException - {original_exception.message}", llm_provider="together_ai", model=model, response=original_exception.response, ) elif original_exception.status_code == 524: exception_mapping_worked = True raise Timeout( message=f"TogetherAIException - {original_exception.message}", llm_provider="together_ai", model=model, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"TogetherAIException - {original_exception.message}", llm_provider="together_ai", model=model, request=original_exception.request, ) elif custom_llm_provider == "aleph_alpha": if ( "This is longer than the model's maximum context length" in error_str ): exception_mapping_worked = True raise ContextWindowExceededError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, response=original_exception.response, ) elif "InvalidToken" in error_str or "No token provided" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, response=original_exception.response, ) elif hasattr(original_exception, "status_code"): print_verbose(f"status code: {original_exception.status_code}") if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, ) elif original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, response=original_exception.response, ) elif original_exception.status_code == 500: exception_mapping_worked = True raise ServiceUnavailableError( message=f"AlephAlphaException - {original_exception.message}", llm_provider="aleph_alpha", model=model, response=original_exception.response, ) raise original_exception raise original_exception elif ( custom_llm_provider == "ollama" or custom_llm_provider == "ollama_chat" ): if isinstance(original_exception, dict): error_str = original_exception.get("error", "") else: error_str = str(original_exception) if "no such file or directory" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}", model=model, llm_provider="ollama", response=original_exception.response, ) elif "Failed to establish a new connection" in error_str: exception_mapping_worked = True raise ServiceUnavailableError( message=f"OllamaException: {original_exception}", llm_provider="ollama", model=model, response=original_exception.response, ) elif "Invalid response object from API" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"OllamaException: {original_exception}", llm_provider="ollama", model=model, response=original_exception.response, ) elif "Read timed out" in error_str: exception_mapping_worked = True raise Timeout( message=f"OllamaException: {original_exception}", llm_provider="ollama", model=model, ) elif custom_llm_provider == "vllm": if hasattr(original_exception, "status_code"): if original_exception.status_code == 0: exception_mapping_worked = True raise APIConnectionError( message=f"VLLMException - {original_exception.message}", llm_provider="vllm", model=model, request=original_exception.request, ) elif custom_llm_provider == "azure": if "Internal server error" in error_str: exception_mapping_worked = True raise APIError( status_code=500, message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, request=httpx.Request(method="POST", url="https://openai.com/"), ) elif "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, response=original_exception.response, ) elif "DeploymentNotFound" in error_str: exception_mapping_worked = True raise NotFoundError( message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, response=original_exception.response, ) elif ( "invalid_request_error" in error_str and "content_policy_violation" in error_str ) or ( "The response was filtered due to the prompt triggering Azure OpenAI's content management" in error_str ): exception_mapping_worked = True raise ContentPolicyViolationError( message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, response=original_exception.response, ) elif "invalid_request_error" in error_str: exception_mapping_worked = True raise BadRequestError( message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, response=original_exception.response, ) elif ( "The api_key client option must be set either by passing api_key to the client or by setting" in error_str ): exception_mapping_worked = True raise AuthenticationError( message=f"{exception_provider} - {original_exception.message} {extra_information}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif hasattr(original_exception, "status_code"): exception_mapping_worked = True if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( message=f"AzureException - {original_exception.message} {extra_information}", model=model, llm_provider="azure", ) if original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( message=f"AzureException - {original_exception.message} {extra_information}", model=model, llm_provider="azure", response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( message=f"AzureException - {original_exception.message} {extra_information}", model=model, llm_provider="azure", response=original_exception.response, ) elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( message=f"AzureException - {original_exception.message} {extra_information}", model=model, llm_provider="azure", response=original_exception.response, ) elif original_exception.status_code == 504: # gateway timeout error exception_mapping_worked = True raise Timeout( message=f"AzureException - {original_exception.message} {extra_information}", model=model, llm_provider="azure", ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, message=f"AzureException - {original_exception.message} {extra_information}", llm_provider="azure", model=model, request=httpx.Request( method="POST", url="https://openai.com/" ), ) else: # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors raise APIConnectionError( message=f"{exception_provider} - {message} {extra_information}", llm_provider="azure", model=model, request=httpx.Request(method="POST", url="https://openai.com/"), ) if ( "BadRequestError.__init__() missing 1 required positional argument: 'param'" in str(original_exception) ): # deal with edge-case invalid request error bug in openai-python sdk exception_mapping_worked = True raise BadRequestError( message=f"{exception_provider}: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, ) else: # ensure generic errors always return APIConnectionError= exception_mapping_worked = True if hasattr(original_exception, "request"): raise APIConnectionError( message=f"{str(original_exception)}", llm_provider=custom_llm_provider, model=model, request=original_exception.request, ) else: raise APIConnectionError( message=f"{str(original_exception)}", llm_provider=custom_llm_provider, model=model, request=httpx.Request( method="POST", url="https://api.openai.com/v1/" ), # stub the request ) except Exception as e: # LOGGING exception_logging( logger_fn=user_logger_fn, additional_args={ "exception_mapping_worked": exception_mapping_worked, "original_exception": original_exception, }, exception=e, ) ## AUTH ERROR if isinstance(e, AuthenticationError) and ( litellm.email or "LITELLM_EMAIL" in os.environ ): threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start() # don't let an error with mapping interrupt the user from receiving an error from the llm api calls if exception_mapping_worked: raise e else: raise original_exception
(model, original_exception, custom_llm_provider, completion_kwargs={}, extra_kwargs={})
64,300
litellm.llms.prompt_templates.factory
function_call_prompt
null
def function_call_prompt(messages: list, functions: list): function_prompt = """Produce JSON OUTPUT ONLY! Adhere to this format {"name": "function_name", "arguments":{"argument_name": "argument_value"}} The following functions are available to you:""" for function in functions: function_prompt += f"""\n{function}\n""" function_added_to_prompt = False for message in messages: if "system" in message["role"]: message["content"] += f""" {function_prompt}""" function_added_to_prompt = True if function_added_to_prompt == False: messages.append({"role": "system", "content": f"""{function_prompt}"""}) return messages
(messages: list, functions: list)
64,302
litellm.utils
get_api_base
Returns the api base used for calling the model. Parameters: - model: str - the model passed to litellm.completion() - optional_params - the 'litellm_params' in router.completion *OR* additional params passed to litellm.completion - eg. api_base, api_key, etc. See `LiteLLM_Params` - https://github.com/BerriAI/litellm/blob/f09e6ba98d65e035a79f73bc069145002ceafd36/litellm/router.py#L67 Returns: - string (api_base) or None Example: ``` from litellm import get_api_base get_api_base(model="gemini/gemini-pro") ```
def get_api_base(model: str, optional_params: dict) -> Optional[str]: """ Returns the api base used for calling the model. Parameters: - model: str - the model passed to litellm.completion() - optional_params - the 'litellm_params' in router.completion *OR* additional params passed to litellm.completion - eg. api_base, api_key, etc. See `LiteLLM_Params` - https://github.com/BerriAI/litellm/blob/f09e6ba98d65e035a79f73bc069145002ceafd36/litellm/router.py#L67 Returns: - string (api_base) or None Example: ``` from litellm import get_api_base get_api_base(model="gemini/gemini-pro") ``` """ try: if "model" in optional_params: _optional_params = LiteLLM_Params(**optional_params) else: # prevent needing to copy and pop the dict _optional_params = LiteLLM_Params( model=model, **optional_params ) # convert to pydantic object except Exception as e: verbose_logger.debug("Error occurred in getting api base - {}".format(str(e))) return None # get llm provider if _optional_params.api_base is not None: return _optional_params.api_base if litellm.model_alias_map and model in litellm.model_alias_map: model = litellm.model_alias_map[model] try: model, custom_llm_provider, dynamic_api_key, dynamic_api_base = ( get_llm_provider( model=model, custom_llm_provider=_optional_params.custom_llm_provider, api_base=_optional_params.api_base, api_key=_optional_params.api_key, ) ) except Exception as e: verbose_logger.error("Error occurred in getting api base - {}".format(str(e))) custom_llm_provider = None dynamic_api_key = None dynamic_api_base = None if dynamic_api_base is not None: return dynamic_api_base if ( _optional_params.vertex_location is not None and _optional_params.vertex_project is not None ): _api_base = "{}-aiplatform.googleapis.com/v1/projects/{}/locations/{}/publishers/google/models/{}:streamGenerateContent".format( _optional_params.vertex_location, _optional_params.vertex_project, _optional_params.vertex_location, model, ) return _api_base if custom_llm_provider is None: return None if custom_llm_provider == "gemini": _api_base = "https://generativelanguage.googleapis.com/v1beta/models/{}:generateContent".format( model ) return _api_base elif custom_llm_provider == "openai": _api_base = "https://api.openai.com" return _api_base return None
(model: str, optional_params: dict) -> Optional[str]
64,303
litellm.utils
get_api_key
null
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]): api_key = dynamic_api_key or litellm.api_key # openai if llm_provider == "openai" or llm_provider == "text-completion-openai": api_key = api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") # anthropic elif llm_provider == "anthropic": api_key = api_key or litellm.anthropic_key or get_secret("ANTHROPIC_API_KEY") # ai21 elif llm_provider == "ai21": api_key = api_key or litellm.ai21_key or get_secret("AI211_API_KEY") # aleph_alpha elif llm_provider == "aleph_alpha": api_key = ( api_key or litellm.aleph_alpha_key or get_secret("ALEPH_ALPHA_API_KEY") ) # baseten elif llm_provider == "baseten": api_key = api_key or litellm.baseten_key or get_secret("BASETEN_API_KEY") # cohere elif llm_provider == "cohere" or llm_provider == "cohere_chat": api_key = api_key or litellm.cohere_key or get_secret("COHERE_API_KEY") # huggingface elif llm_provider == "huggingface": api_key = ( api_key or litellm.huggingface_key or get_secret("HUGGINGFACE_API_KEY") ) # nlp_cloud elif llm_provider == "nlp_cloud": api_key = api_key or litellm.nlp_cloud_key or get_secret("NLP_CLOUD_API_KEY") # replicate elif llm_provider == "replicate": api_key = api_key or litellm.replicate_key or get_secret("REPLICATE_API_KEY") # together_ai elif llm_provider == "together_ai": api_key = ( api_key or litellm.togetherai_api_key or get_secret("TOGETHERAI_API_KEY") or get_secret("TOGETHER_AI_TOKEN") ) return api_key
(llm_provider: str, dynamic_api_key: Optional[str])
64,304
litellm.assistants.main
get_assistants
null
def get_assistants( custom_llm_provider: Literal["openai"], client: Optional[OpenAI] = None, **kwargs, ) -> SyncCursorPage[Assistant]: optional_params = GenericLiteLLMParams(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default if ( timeout is not None and isinstance(timeout, httpx.Timeout) and supports_httpx_timeout(custom_llm_provider) == False ): read_timeout = timeout.read or 600 timeout = read_timeout # default 10 min timeout elif timeout is not None and not isinstance(timeout, httpx.Timeout): timeout = float(timeout) # type: ignore elif timeout is None: timeout = 600.0 response: Optional[SyncCursorPage[Assistant]] = None if custom_llm_provider == "openai": api_base = ( optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there or litellm.api_base or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1" ) organization = ( optional_params.organization or litellm.organization or os.getenv("OPENAI_ORGANIZATION", None) or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there or litellm.openai_key or os.getenv("OPENAI_API_KEY") ) response = openai_assistants_api.get_assistants( api_base=api_base, api_key=api_key, timeout=timeout, max_retries=optional_params.max_retries, organization=organization, client=client, ) else: raise litellm.exceptions.BadRequestError( message="LiteLLM doesn't support {} for 'get_assistants'. Only 'openai' is supported.".format( custom_llm_provider ), model="n/a", llm_provider=custom_llm_provider, response=httpx.Response( status_code=400, content="Unsupported provider", request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response
(custom_llm_provider: Literal['openai'], client: Optional[openai.OpenAI] = None, **kwargs) -> openai.pagination.SyncCursorPage[Assistant]
64,305
litellm.utils
get_first_chars_messages
null
def get_first_chars_messages(kwargs: dict) -> str: try: _messages = kwargs.get("messages") _messages = str(_messages)[:100] return _messages except: return ""
(kwargs: dict) -> str
64,306
litellm.utils
get_litellm_params
null
def get_litellm_params( api_key=None, force_timeout=600, azure=False, logger_fn=None, verbose=False, hugging_face=False, replicate=False, together_ai=False, custom_llm_provider=None, api_base=None, litellm_call_id=None, model_alias_map=None, completion_call_id=None, metadata=None, model_info=None, proxy_server_request=None, acompletion=None, preset_cache_key=None, no_log=None, ): litellm_params = { "acompletion": acompletion, "api_key": api_key, "force_timeout": force_timeout, "logger_fn": logger_fn, "verbose": verbose, "custom_llm_provider": custom_llm_provider, "api_base": api_base, "litellm_call_id": litellm_call_id, "model_alias_map": model_alias_map, "completion_call_id": completion_call_id, "metadata": metadata, "model_info": model_info, "proxy_server_request": proxy_server_request, "preset_cache_key": preset_cache_key, "no-log": no_log, "stream_response": {}, # litellm_call_id: ModelResponse Dict } return litellm_params
(api_key=None, force_timeout=600, azure=False, logger_fn=None, verbose=False, hugging_face=False, replicate=False, together_ai=False, custom_llm_provider=None, api_base=None, litellm_call_id=None, model_alias_map=None, completion_call_id=None, metadata=None, model_info=None, proxy_server_request=None, acompletion=None, preset_cache_key=None, no_log=None)
64,307
litellm.utils
get_llm_provider
Returns the provider for a given model name - e.g. 'azure/chatgpt-v-2' -> 'azure' For router -> Can also give the whole litellm param dict -> this function will extract the relevant details
def get_llm_provider( model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None, litellm_params: Optional[LiteLLM_Params] = None, ) -> Tuple[str, str, Optional[str], Optional[str]]: """ Returns the provider for a given model name - e.g. 'azure/chatgpt-v-2' -> 'azure' For router -> Can also give the whole litellm param dict -> this function will extract the relevant details """ try: ## IF LITELLM PARAMS GIVEN ## if litellm_params is not None: assert ( custom_llm_provider is None and api_base is None and api_key is None ), "Either pass in litellm_params or the custom_llm_provider/api_base/api_key. Otherwise, these values will be overriden." custom_llm_provider = litellm_params.custom_llm_provider api_base = litellm_params.api_base api_key = litellm_params.api_key dynamic_api_key = None # check if llm provider provided # AZURE AI-Studio Logic - Azure AI Studio supports AZURE/Cohere # If User passes azure/command-r-plus -> we should send it to cohere_chat/command-r-plus if model.split("/", 1)[0] == "azure": if _is_non_openai_azure_model(model): custom_llm_provider = "openai" return model, custom_llm_provider, dynamic_api_key, api_base if custom_llm_provider: return model, custom_llm_provider, dynamic_api_key, api_base if api_key and api_key.startswith("os.environ/"): dynamic_api_key = get_secret(api_key) # check if llm provider part of model name if ( model.split("/", 1)[0] in litellm.provider_list and model.split("/", 1)[0] not in litellm.model_list and len(model.split("/")) > 1 # handle edge case where user passes in `litellm --model mistral` https://github.com/BerriAI/litellm/issues/1351 ): custom_llm_provider = model.split("/", 1)[0] model = model.split("/", 1)[1] if custom_llm_provider == "perplexity": # perplexity is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.perplexity.ai api_base = "https://api.perplexity.ai" dynamic_api_key = get_secret("PERPLEXITYAI_API_KEY") elif custom_llm_provider == "anyscale": # anyscale is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 api_base = "https://api.endpoints.anyscale.com/v1" dynamic_api_key = get_secret("ANYSCALE_API_KEY") elif custom_llm_provider == "deepinfra": # deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 api_base = "https://api.deepinfra.com/v1/openai" dynamic_api_key = get_secret("DEEPINFRA_API_KEY") elif custom_llm_provider == "groq": # groq is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.groq.com/openai/v1 api_base = "https://api.groq.com/openai/v1" dynamic_api_key = get_secret("GROQ_API_KEY") elif custom_llm_provider == "deepseek": # deepseek is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.deepseek.com/v1 api_base = "https://api.deepseek.com/v1" dynamic_api_key = get_secret("DEEPSEEK_API_KEY") elif custom_llm_provider == "fireworks_ai": # fireworks is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.fireworks.ai/inference/v1 if not model.startswith("accounts/fireworks/models"): model = f"accounts/fireworks/models/{model}" api_base = "https://api.fireworks.ai/inference/v1" dynamic_api_key = ( get_secret("FIREWORKS_API_KEY") or get_secret("FIREWORKS_AI_API_KEY") or get_secret("FIREWORKSAI_API_KEY") or get_secret("FIREWORKS_AI_TOKEN") ) elif custom_llm_provider == "mistral": # mistral is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.mistral.ai api_base = ( api_base or get_secret("MISTRAL_AZURE_API_BASE") # for Azure AI Mistral or "https://api.mistral.ai/v1" ) # type: ignore # if api_base does not end with /v1 we add it if api_base is not None and not api_base.endswith( "/v1" ): # Mistral always needs a /v1 at the end api_base = api_base + "/v1" dynamic_api_key = ( api_key or get_secret("MISTRAL_AZURE_API_KEY") # for Azure AI Mistral or get_secret("MISTRAL_API_KEY") ) elif custom_llm_provider == "voyage": # voyage is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.voyageai.com/v1 api_base = "https://api.voyageai.com/v1" dynamic_api_key = get_secret("VOYAGE_API_KEY") elif custom_llm_provider == "together_ai": api_base = "https://api.together.xyz/v1" dynamic_api_key = ( get_secret("TOGETHER_API_KEY") or get_secret("TOGETHER_AI_API_KEY") or get_secret("TOGETHERAI_API_KEY") or get_secret("TOGETHER_AI_TOKEN") ) if api_base is not None and not isinstance(api_base, str): raise Exception( "api base needs to be a string. api_base={}".format(api_base) ) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): raise Exception( "dynamic_api_key needs to be a string. dynamic_api_key={}".format( dynamic_api_key ) ) return model, custom_llm_provider, dynamic_api_key, api_base elif model.split("/", 1)[0] in litellm.provider_list: custom_llm_provider = model.split("/", 1)[0] model = model.split("/", 1)[1] if api_base is not None and not isinstance(api_base, str): raise Exception( "api base needs to be a string. api_base={}".format(api_base) ) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): raise Exception( "dynamic_api_key needs to be a string. dynamic_api_key={}".format( dynamic_api_key ) ) return model, custom_llm_provider, dynamic_api_key, api_base # check if api base is a known openai compatible endpoint if api_base: for endpoint in litellm.openai_compatible_endpoints: if endpoint in api_base: if endpoint == "api.perplexity.ai": custom_llm_provider = "perplexity" dynamic_api_key = get_secret("PERPLEXITYAI_API_KEY") elif endpoint == "api.endpoints.anyscale.com/v1": custom_llm_provider = "anyscale" dynamic_api_key = get_secret("ANYSCALE_API_KEY") elif endpoint == "api.deepinfra.com/v1/openai": custom_llm_provider = "deepinfra" dynamic_api_key = get_secret("DEEPINFRA_API_KEY") elif endpoint == "api.mistral.ai/v1": custom_llm_provider = "mistral" dynamic_api_key = get_secret("MISTRAL_API_KEY") elif endpoint == "api.groq.com/openai/v1": custom_llm_provider = "groq" dynamic_api_key = get_secret("GROQ_API_KEY") elif endpoint == "api.deepseek.com/v1": custom_llm_provider = "deepseek" dynamic_api_key = get_secret("DEEPSEEK_API_KEY") if api_base is not None and not isinstance(api_base, str): raise Exception( "api base needs to be a string. api_base={}".format( api_base ) ) if dynamic_api_key is not None and not isinstance( dynamic_api_key, str ): raise Exception( "dynamic_api_key needs to be a string. dynamic_api_key={}".format( dynamic_api_key ) ) return model, custom_llm_provider, dynamic_api_key, api_base # type: ignore # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) ## openai - chatcompletion + text completion if ( model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model or model in litellm.openai_image_generation_models ): custom_llm_provider = "openai" elif model in litellm.open_ai_text_completion_models: custom_llm_provider = "text-completion-openai" ## anthropic elif model in litellm.anthropic_models: custom_llm_provider = "anthropic" ## cohere elif model in litellm.cohere_models or model in litellm.cohere_embedding_models: custom_llm_provider = "cohere" ## cohere chat models elif model in litellm.cohere_chat_models: custom_llm_provider = "cohere_chat" ## replicate elif model in litellm.replicate_models or (":" in model and len(model) > 64): model_parts = model.split(":") if ( len(model_parts) > 1 and len(model_parts[1]) == 64 ): ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3" custom_llm_provider = "replicate" elif model in litellm.replicate_models: custom_llm_provider = "replicate" ## openrouter elif model in litellm.openrouter_models: custom_llm_provider = "openrouter" ## openrouter elif model in litellm.maritalk_models: custom_llm_provider = "maritalk" ## vertex - text + chat + language (gemini) models elif ( model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models or model in litellm.vertex_text_models or model in litellm.vertex_code_text_models or model in litellm.vertex_language_models or model in litellm.vertex_embedding_models or model in litellm.vertex_vision_models ): custom_llm_provider = "vertex_ai" ## ai21 elif model in litellm.ai21_models: custom_llm_provider = "ai21" ## aleph_alpha elif model in litellm.aleph_alpha_models: custom_llm_provider = "aleph_alpha" ## baseten elif model in litellm.baseten_models: custom_llm_provider = "baseten" ## nlp_cloud elif model in litellm.nlp_cloud_models: custom_llm_provider = "nlp_cloud" ## petals elif model in litellm.petals_models: custom_llm_provider = "petals" ## bedrock elif ( model in litellm.bedrock_models or model in litellm.bedrock_embedding_models ): custom_llm_provider = "bedrock" elif model in litellm.watsonx_models: custom_llm_provider = "watsonx" # openai embeddings elif model in litellm.open_ai_embedding_models: custom_llm_provider = "openai" if custom_llm_provider is None or custom_llm_provider == "": if litellm.suppress_debug_info == False: print() # noqa print( # noqa "\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m" # noqa ) # noqa print() # noqa error_str = f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model={model}\n Pass model as E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/starcoder',..)` Learn more: https://docs.litellm.ai/docs/providers" # maps to openai.NotFoundError, this is raised when openai does not recognize the llm raise litellm.exceptions.BadRequestError( # type: ignore message=error_str, model=model, response=httpx.Response( status_code=400, content=error_str, request=httpx.Request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore ), llm_provider="", ) if api_base is not None and not isinstance(api_base, str): raise Exception( "api base needs to be a string. api_base={}".format(api_base) ) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): raise Exception( "dynamic_api_key needs to be a string. dynamic_api_key={}".format( dynamic_api_key ) ) return model, custom_llm_provider, dynamic_api_key, api_base except Exception as e: if isinstance(e, litellm.exceptions.BadRequestError): raise e else: error_str = ( f"GetLLMProvider Exception - {str(e)}\n\noriginal model: {model}" ) raise litellm.exceptions.BadRequestError( # type: ignore message=f"GetLLMProvider Exception - {str(e)}\n\noriginal model: {model}", model=model, response=httpx.Response( status_code=400, content=error_str, request=httpx.Request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore ), llm_provider="", )
(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None, litellm_params: Optional[litellm.types.router.LiteLLM_Params] = None) -> Tuple[str, str, Optional[str], Optional[str]]
64,308
litellm.utils
get_max_tokens
Get the maximum number of output tokens allowed for a given model. Parameters: model (str): The name of the model. Returns: int: The maximum number of tokens allowed for the given model. Raises: Exception: If the model is not mapped yet. Example: >>> get_max_tokens("gpt-4") 8192
def get_max_tokens(model: str): """ Get the maximum number of output tokens allowed for a given model. Parameters: model (str): The name of the model. Returns: int: The maximum number of tokens allowed for the given model. Raises: Exception: If the model is not mapped yet. Example: >>> get_max_tokens("gpt-4") 8192 """ def _get_max_position_embeddings(model_name): # Construct the URL for the config.json file config_url = f"https://huggingface.co/{model_name}/raw/main/config.json" try: # Make the HTTP request to get the raw JSON file response = requests.get(config_url) response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx) # Parse the JSON response config_json = response.json() # Extract and return the max_position_embeddings max_position_embeddings = config_json.get("max_position_embeddings") if max_position_embeddings is not None: return max_position_embeddings else: return None except requests.exceptions.RequestException as e: return None try: if model in litellm.model_cost: if "max_output_tokens" in litellm.model_cost[model]: return litellm.model_cost[model]["max_output_tokens"] elif "max_tokens" in litellm.model_cost[model]: return litellm.model_cost[model]["max_tokens"] model, custom_llm_provider, _, _ = get_llm_provider(model=model) if custom_llm_provider == "huggingface": max_tokens = _get_max_position_embeddings(model_name=model) return max_tokens else: raise Exception() except: raise Exception( "This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json" )
(model: str)
64,309
litellm.assistants.main
get_messages
null
def get_messages( custom_llm_provider: Literal["openai"], thread_id: str, client: Optional[OpenAI] = None, **kwargs, ) -> SyncCursorPage[OpenAIMessage]: optional_params = GenericLiteLLMParams(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default if ( timeout is not None and isinstance(timeout, httpx.Timeout) and supports_httpx_timeout(custom_llm_provider) == False ): read_timeout = timeout.read or 600 timeout = read_timeout # default 10 min timeout elif timeout is not None and not isinstance(timeout, httpx.Timeout): timeout = float(timeout) # type: ignore elif timeout is None: timeout = 600.0 response: Optional[SyncCursorPage[OpenAIMessage]] = None if custom_llm_provider == "openai": api_base = ( optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there or litellm.api_base or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1" ) organization = ( optional_params.organization or litellm.organization or os.getenv("OPENAI_ORGANIZATION", None) or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there or litellm.openai_key or os.getenv("OPENAI_API_KEY") ) response = openai_assistants_api.get_messages( thread_id=thread_id, api_base=api_base, api_key=api_key, timeout=timeout, max_retries=optional_params.max_retries, organization=organization, client=client, ) else: raise litellm.exceptions.BadRequestError( message="LiteLLM doesn't support {} for 'get_messages'. Only 'openai' is supported.".format( custom_llm_provider ), model="n/a", llm_provider=custom_llm_provider, response=httpx.Response( status_code=400, content="Unsupported provider", request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response
(custom_llm_provider: Literal['openai'], thread_id: str, client: Optional[openai.OpenAI] = None, **kwargs) -> openai.pagination.SyncCursorPage[Message]
64,310
litellm
get_model_cost_map
null
def get_model_cost_map(url: str): if ( os.getenv("LITELLM_LOCAL_MODEL_COST_MAP", False) == True or os.getenv("LITELLM_LOCAL_MODEL_COST_MAP", False) == "True" ): import importlib.resources import json with importlib.resources.open_text( "litellm", "model_prices_and_context_window_backup.json" ) as f: content = json.load(f) return content try: with requests.get( url, timeout=5 ) as response: # set a 5 second timeout for the get request response.raise_for_status() # Raise an exception if the request is unsuccessful content = response.json() return content except Exception as e: import importlib.resources import json with importlib.resources.open_text( "litellm", "model_prices_and_context_window_backup.json" ) as f: content = json.load(f) return content
(url: str)
64,311
litellm.utils
get_model_info
Get a dict for the maximum tokens (context window), input_cost_per_token, output_cost_per_token for a given model. Parameters: model (str): The name of the model. Returns: dict: A dictionary containing the following information: - max_tokens (int): The maximum number of tokens allowed for the given model. - input_cost_per_token (float): The cost per token for input. - output_cost_per_token (float): The cost per token for output. - litellm_provider (str): The provider of the model (e.g., "openai"). - mode (str): The mode of the model (e.g., "chat" or "completion"). Raises: Exception: If the model is not mapped yet. Example: >>> get_model_info("gpt-4") { "max_tokens": 8192, "input_cost_per_token": 0.00003, "output_cost_per_token": 0.00006, "litellm_provider": "openai", "mode": "chat" }
def get_model_info(model: str): """ Get a dict for the maximum tokens (context window), input_cost_per_token, output_cost_per_token for a given model. Parameters: model (str): The name of the model. Returns: dict: A dictionary containing the following information: - max_tokens (int): The maximum number of tokens allowed for the given model. - input_cost_per_token (float): The cost per token for input. - output_cost_per_token (float): The cost per token for output. - litellm_provider (str): The provider of the model (e.g., "openai"). - mode (str): The mode of the model (e.g., "chat" or "completion"). Raises: Exception: If the model is not mapped yet. Example: >>> get_model_info("gpt-4") { "max_tokens": 8192, "input_cost_per_token": 0.00003, "output_cost_per_token": 0.00006, "litellm_provider": "openai", "mode": "chat" } """ def _get_max_position_embeddings(model_name): # Construct the URL for the config.json file config_url = f"https://huggingface.co/{model_name}/raw/main/config.json" try: # Make the HTTP request to get the raw JSON file response = requests.get(config_url) response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx) # Parse the JSON response config_json = response.json() # Extract and return the max_position_embeddings max_position_embeddings = config_json.get("max_position_embeddings") if max_position_embeddings is not None: return max_position_embeddings else: return None except requests.exceptions.RequestException as e: return None try: azure_llms = litellm.azure_llms if model in azure_llms: model = azure_llms[model] if model in litellm.model_cost: return litellm.model_cost[model] model, custom_llm_provider, _, _ = get_llm_provider(model=model) if custom_llm_provider == "huggingface": max_tokens = _get_max_position_embeddings(model_name=model) return { "max_tokens": max_tokens, "input_cost_per_token": 0, "output_cost_per_token": 0, "litellm_provider": "huggingface", "mode": "chat", } else: """ Check if model in model cost map """ if model in litellm.model_cost: return litellm.model_cost[model] else: raise Exception() except: raise Exception( "This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json" )
(model: str)
64,312
litellm.utils
get_model_list
null
def get_model_list(): global last_fetched_at, print_verbose try: # if user is using hosted product -> get their updated model list user_email = ( os.getenv("LITELLM_EMAIL") or litellm.email or litellm.token or os.getenv("LITELLM_TOKEN") ) if user_email: # make the api call last_fetched_at = time.time() print_verbose(f"last_fetched_at: {last_fetched_at}") response = requests.post( url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}), ) print_verbose(f"get_model_list response: {response.text}") data = response.json() # update model list model_list = data["model_list"] # # check if all model providers are in environment # model_providers = data["model_providers"] # missing_llm_provider = None # for item in model_providers: # if f"{item.upper()}_API_KEY" not in os.environ: # missing_llm_provider = item # break # # update environment - if required # threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start() return model_list return [] # return empty list by default except: print_verbose( f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}" )
()
64,313
litellm.utils
get_optional_params
null
def get_optional_params( # use the openai defaults # https://platform.openai.com/docs/api-reference/chat/create model: str, functions=None, function_call=None, temperature=None, top_p=None, n=None, stream=False, stream_options=None, stop=None, max_tokens=None, presence_penalty=None, frequency_penalty=None, logit_bias=None, user=None, custom_llm_provider="", response_format=None, seed=None, tools=None, tool_choice=None, max_retries=None, logprobs=None, top_logprobs=None, extra_headers=None, **kwargs, ): # retrieve all parameters passed to the function passed_params = locals().copy() special_params = passed_params.pop("kwargs") for k, v in special_params.items(): if k.startswith("aws_") and ( custom_llm_provider != "bedrock" and custom_llm_provider != "sagemaker" ): # allow dynamically setting boto3 init logic continue elif k == "hf_model_name" and custom_llm_provider != "sagemaker": continue elif ( k.startswith("vertex_") and custom_llm_provider != "vertex_ai" ): # allow dynamically setting vertex ai init logic continue passed_params[k] = v optional_params: Dict = {} common_auth_dict = litellm.common_cloud_provider_auth_params if custom_llm_provider in common_auth_dict["providers"]: """ Check if params = ["project", "region_name", "token"] and correctly translate for = ["azure", "vertex_ai", "watsonx", "aws"] """ if custom_llm_provider == "azure": optional_params = litellm.AzureOpenAIConfig().map_special_auth_params( non_default_params=passed_params, optional_params=optional_params ) elif custom_llm_provider == "bedrock": optional_params = ( litellm.AmazonBedrockGlobalConfig().map_special_auth_params( non_default_params=passed_params, optional_params=optional_params ) ) elif custom_llm_provider == "vertex_ai": optional_params = litellm.VertexAIConfig().map_special_auth_params( non_default_params=passed_params, optional_params=optional_params ) elif custom_llm_provider == "watsonx": optional_params = litellm.IBMWatsonXAIConfig().map_special_auth_params( non_default_params=passed_params, optional_params=optional_params ) default_params = { "functions": None, "function_call": None, "temperature": None, "top_p": None, "n": None, "stream": None, "stream_options": None, "stop": None, "max_tokens": None, "presence_penalty": None, "frequency_penalty": None, "logit_bias": None, "user": None, "model": None, "custom_llm_provider": "", "response_format": None, "seed": None, "tools": None, "tool_choice": None, "max_retries": None, "logprobs": None, "top_logprobs": None, "extra_headers": None, } # filter out those parameters that were passed with non-default values non_default_params = { k: v for k, v in passed_params.items() if ( k != "model" and k != "custom_llm_provider" and k in default_params and v != default_params[k] ) } ## raise exception if function calling passed in for a provider that doesn't support it if ( "functions" in non_default_params or "function_call" in non_default_params or "tools" in non_default_params ): if ( custom_llm_provider != "openai" and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure" and custom_llm_provider != "vertex_ai" and custom_llm_provider != "anyscale" and custom_llm_provider != "together_ai" and custom_llm_provider != "groq" and custom_llm_provider != "deepseek" and custom_llm_provider != "mistral" and custom_llm_provider != "anthropic" and custom_llm_provider != "cohere_chat" and custom_llm_provider != "cohere" and custom_llm_provider != "bedrock" and custom_llm_provider != "ollama_chat" ): if custom_llm_provider == "ollama": # ollama actually supports json output optional_params["format"] = "json" litellm.add_function_to_prompt = ( True # so that main.py adds the function call to the prompt ) if "tools" in non_default_params: optional_params["functions_unsupported_model"] = ( non_default_params.pop("tools") ) non_default_params.pop( "tool_choice", None ) # causes ollama requests to hang elif "functions" in non_default_params: optional_params["functions_unsupported_model"] = ( non_default_params.pop("functions") ) elif ( litellm.add_function_to_prompt ): # if user opts to add it to prompt instead optional_params["functions_unsupported_model"] = non_default_params.pop( "tools", non_default_params.pop("functions", None) ) else: raise UnsupportedParamsError( status_code=500, message=f"Function calling is not supported by {custom_llm_provider}.", ) def _check_valid_arg(supported_params): verbose_logger.debug( f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}" ) verbose_logger.debug( f"\nLiteLLM: Params passed to completion() {passed_params}" ) verbose_logger.debug( f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}" ) unsupported_params = {} for k in non_default_params.keys(): if k not in supported_params: if k == "user": continue if k == "n" and n == 1: # langchain sends n=1 as a default value continue # skip this param if ( k == "max_retries" ): # TODO: This is a patch. We support max retries for OpenAI, Azure. For non OpenAI LLMs we need to add support for max retries continue # skip this param # Always keeps this in elif code blocks else: unsupported_params[k] = non_default_params[k] if unsupported_params and not litellm.drop_params: raise UnsupportedParamsError( status_code=500, message=f"{custom_llm_provider} does not support parameters: {unsupported_params}. To drop these, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\n", ) def _map_and_modify_arg(supported_params: dict, provider: str, model: str): """ filter params to fit the required provider format, drop those that don't fit if user sets `litellm.drop_params = True`. """ filtered_stop = None if "stop" in supported_params and litellm.drop_params: if provider == "bedrock" and "amazon" in model: filtered_stop = [] if isinstance(stop, list): for s in stop: if re.match(r"^(\|+|User:)$", s): filtered_stop.append(s) if filtered_stop is not None: supported_params["stop"] = filtered_stop return supported_params ## raise exception if provider doesn't support passed in param if custom_llm_provider == "anthropic": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) optional_params = litellm.AnthropicConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params ) elif custom_llm_provider == "cohere": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) # handle cohere params if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if max_tokens is not None: optional_params["max_tokens"] = max_tokens if n is not None: optional_params["num_generations"] = n if logit_bias is not None: optional_params["logit_bias"] = logit_bias if top_p is not None: optional_params["p"] = top_p if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if stop is not None: optional_params["stop_sequences"] = stop elif custom_llm_provider == "cohere_chat": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) # handle cohere params if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if max_tokens is not None: optional_params["max_tokens"] = max_tokens if n is not None: optional_params["num_generations"] = n if top_p is not None: optional_params["p"] = top_p if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if stop is not None: optional_params["stop_sequences"] = stop if tools is not None: optional_params["tools"] = tools if seed is not None: optional_params["seed"] = seed elif custom_llm_provider == "maritalk": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) # handle cohere params if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if max_tokens is not None: optional_params["max_tokens"] = max_tokens if logit_bias is not None: optional_params["logit_bias"] = logit_bias if top_p is not None: optional_params["p"] = top_p if presence_penalty is not None: optional_params["repetition_penalty"] = presence_penalty if stop is not None: optional_params["stopping_tokens"] = stop elif custom_llm_provider == "replicate": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if stream: optional_params["stream"] = stream return optional_params if max_tokens is not None: if "vicuna" in model or "flan" in model: optional_params["max_length"] = max_tokens elif "meta/codellama-13b" in model: optional_params["max_tokens"] = max_tokens else: optional_params["max_new_tokens"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stop is not None: optional_params["stop_sequences"] = stop elif custom_llm_provider == "huggingface": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) optional_params = litellm.HuggingfaceConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params ) elif custom_llm_provider == "together_ai": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if max_tokens is not None: optional_params["max_tokens"] = max_tokens if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if stop is not None: optional_params["stop"] = stop if tools is not None: optional_params["tools"] = tools if tool_choice is not None: optional_params["tool_choice"] = tool_choice elif custom_llm_provider == "ai21": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if stream: optional_params["stream"] = stream if n is not None: optional_params["numResults"] = n if max_tokens is not None: optional_params["maxTokens"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["topP"] = top_p if stop is not None: optional_params["stopSequences"] = stop if frequency_penalty is not None: optional_params["frequencyPenalty"] = {"scale": frequency_penalty} if presence_penalty is not None: optional_params["presencePenalty"] = {"scale": presence_penalty} elif ( custom_llm_provider == "palm" or custom_llm_provider == "gemini" ): # https://developers.generativeai.google/tutorials/curl_quickstart ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stream: optional_params["stream"] = stream if n is not None: optional_params["candidate_count"] = n if stop is not None: if isinstance(stop, str): optional_params["stop_sequences"] = [stop] elif isinstance(stop, list): optional_params["stop_sequences"] = stop if max_tokens is not None: optional_params["max_output_tokens"] = max_tokens elif custom_llm_provider == "vertex_ai" and ( model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models or model in litellm.vertex_text_models or model in litellm.vertex_code_text_models or model in litellm.vertex_language_models or model in litellm.vertex_embedding_models or model in litellm.vertex_vision_models ): print_verbose(f"(start) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK") ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexAIConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, ) print_verbose( f"(end) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK - optional_params: {optional_params}" ) elif ( custom_llm_provider == "vertex_ai" and model in litellm.vertex_anthropic_models ): supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexAIAnthropicConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, ) elif custom_llm_provider == "sagemaker": ## check if unsupported param passed in supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None if temperature is not None: if temperature == 0.0 or temperature == 0: # hugging face exception raised when temp==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive temperature = 0.01 optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if n is not None: optional_params["best_of"] = n optional_params["do_sample"] = ( True # Need to sample if you want best of for hf inference endpoints ) if stream is not None: optional_params["stream"] = stream if stop is not None: optional_params["stop"] = stop if max_tokens is not None: # HF TGI raises the following exception when max_new_tokens==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive if max_tokens == 0: max_tokens = 1 optional_params["max_new_tokens"] = max_tokens elif custom_llm_provider == "bedrock": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) if "ai21" in model: _check_valid_arg(supported_params=supported_params) # params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[], # https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra if max_tokens is not None: optional_params["maxTokens"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["topP"] = top_p if stream: optional_params["stream"] = stream elif "anthropic" in model: _check_valid_arg(supported_params=supported_params) # anthropic params on bedrock # \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}" if model.startswith("anthropic.claude-3"): optional_params = ( litellm.AmazonAnthropicClaude3Config().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, ) ) else: optional_params = litellm.AmazonAnthropicConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, ) elif "amazon" in model: # amazon titan llms _check_valid_arg(supported_params=supported_params) # see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large if max_tokens is not None: optional_params["maxTokenCount"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if stop is not None: filtered_stop = _map_and_modify_arg( {"stop": stop}, provider="bedrock", model=model ) optional_params["stopSequences"] = filtered_stop["stop"] if top_p is not None: optional_params["topP"] = top_p if stream: optional_params["stream"] = stream elif "meta" in model: # amazon / meta llms _check_valid_arg(supported_params=supported_params) # see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large if max_tokens is not None: optional_params["max_gen_len"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stream: optional_params["stream"] = stream elif "cohere" in model: # cohere models on bedrock _check_valid_arg(supported_params=supported_params) # handle cohere params if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if max_tokens is not None: optional_params["max_tokens"] = max_tokens elif "mistral" in model: _check_valid_arg(supported_params=supported_params) # mistral params on bedrock # \"max_tokens\":400,\"temperature\":0.7,\"top_p\":0.7,\"stop\":[\"\\\\n\\\\nHuman:\"]}" if max_tokens is not None: optional_params["max_tokens"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stop is not None: optional_params["stop"] = stop if stream is not None: optional_params["stream"] = stream elif custom_llm_provider == "aleph_alpha": supported_params = [ "max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop", ] _check_valid_arg(supported_params=supported_params) if max_tokens is not None: optional_params["maximum_tokens"] = max_tokens if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if n is not None: optional_params["n"] = n if stop is not None: optional_params["stop_sequences"] = stop elif custom_llm_provider == "cloudflare": # https://developers.cloudflare.com/workers-ai/models/text-generation/#input supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if max_tokens is not None: optional_params["max_tokens"] = max_tokens if stream is not None: optional_params["stream"] = stream elif custom_llm_provider == "ollama": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if max_tokens is not None: optional_params["num_predict"] = max_tokens if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if frequency_penalty is not None: optional_params["repeat_penalty"] = frequency_penalty if stop is not None: optional_params["stop"] = stop if response_format is not None and response_format["type"] == "json_object": optional_params["format"] = "json" elif custom_llm_provider == "ollama_chat": supported_params = litellm.OllamaChatConfig().get_supported_openai_params() _check_valid_arg(supported_params=supported_params) optional_params = litellm.OllamaChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params ) elif custom_llm_provider == "nlp_cloud": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if max_tokens is not None: optional_params["max_length"] = max_tokens if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if n is not None: optional_params["num_return_sequences"] = n if stop is not None: optional_params["stop_sequences"] = stop elif custom_llm_provider == "petals": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) # max_new_tokens=1,temperature=0.9, top_p=0.6 if max_tokens is not None: optional_params["max_new_tokens"] = max_tokens if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stream: optional_params["stream"] = stream elif custom_llm_provider == "deepinfra": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if temperature is not None: if ( temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1" ): # this model does no support temperature == 0 temperature = 0.0001 # close to 0 optional_params["temperature"] = temperature if top_p: optional_params["top_p"] = top_p if n: optional_params["n"] = n if stream: optional_params["stream"] = stream if stop: optional_params["stop"] = stop if max_tokens: optional_params["max_tokens"] = max_tokens if presence_penalty: optional_params["presence_penalty"] = presence_penalty if frequency_penalty: optional_params["frequency_penalty"] = frequency_penalty if logit_bias: optional_params["logit_bias"] = logit_bias if user: optional_params["user"] = user elif custom_llm_provider == "perplexity": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if temperature is not None: if ( temperature == 0 and model == "mistral-7b-instruct" ): # this model does no support temperature == 0 temperature = 0.0001 # close to 0 optional_params["temperature"] = temperature if top_p: optional_params["top_p"] = top_p if stream: optional_params["stream"] = stream if max_tokens: optional_params["max_tokens"] = max_tokens if presence_penalty: optional_params["presence_penalty"] = presence_penalty if frequency_penalty: optional_params["frequency_penalty"] = frequency_penalty elif custom_llm_provider == "anyscale": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) if model in [ "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Instruct-v0.1", ]: supported_params += [ "functions", "function_call", "tools", "tool_choice", "response_format", ] _check_valid_arg(supported_params=supported_params) optional_params = non_default_params if temperature is not None: if temperature == 0 and model in [ "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Instruct-v0.1", ]: # this model does no support temperature == 0 temperature = 0.0001 # close to 0 optional_params["temperature"] = temperature if top_p: optional_params["top_p"] = top_p if stream: optional_params["stream"] = stream if max_tokens: optional_params["max_tokens"] = max_tokens elif custom_llm_provider == "mistral": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if stream is not None: optional_params["stream"] = stream if max_tokens is not None: optional_params["max_tokens"] = max_tokens if tools is not None: optional_params["tools"] = tools if tool_choice is not None: optional_params["tool_choice"] = tool_choice if response_format is not None: optional_params["response_format"] = response_format # check safe_mode, random_seed: https://docs.mistral.ai/api/#operation/createChatCompletion safe_mode = passed_params.pop("safe_mode", None) random_seed = passed_params.pop("random_seed", None) extra_body = {} if safe_mode is not None: extra_body["safe_mode"] = safe_mode if random_seed is not None: extra_body["random_seed"] = random_seed optional_params["extra_body"] = ( extra_body # openai client supports `extra_body` param ) elif custom_llm_provider == "groq": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if temperature is not None: optional_params["temperature"] = temperature if max_tokens is not None: optional_params["max_tokens"] = max_tokens if top_p is not None: optional_params["top_p"] = top_p if stream is not None: optional_params["stream"] = stream if stop is not None: optional_params["stop"] = stop if tools is not None: optional_params["tools"] = tools if tool_choice is not None: optional_params["tool_choice"] = tool_choice if response_format is not None: optional_params["response_format"] = response_format if seed is not None: optional_params["seed"] = seed elif custom_llm_provider == "deepseek": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if max_tokens is not None: optional_params["max_tokens"] = max_tokens if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if stop is not None: optional_params["stop"] = stop if stream is not None: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if logprobs is not None: optional_params["logprobs"] = logprobs if top_logprobs is not None: optional_params["top_logprobs"] = top_logprobs elif custom_llm_provider == "openrouter": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if functions is not None: optional_params["functions"] = functions if function_call is not None: optional_params["function_call"] = function_call if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if n is not None: optional_params["n"] = n if stream is not None: optional_params["stream"] = stream if stop is not None: optional_params["stop"] = stop if max_tokens is not None: optional_params["max_tokens"] = max_tokens if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if logit_bias is not None: optional_params["logit_bias"] = logit_bias if user is not None: optional_params["user"] = user if response_format is not None: optional_params["response_format"] = response_format if seed is not None: optional_params["seed"] = seed if tools is not None: optional_params["tools"] = tools if tool_choice is not None: optional_params["tool_choice"] = tool_choice if max_retries is not None: optional_params["max_retries"] = max_retries # OpenRouter-only parameters extra_body = {} transforms = passed_params.pop("transforms", None) models = passed_params.pop("models", None) route = passed_params.pop("route", None) if transforms is not None: extra_body["transforms"] = transforms if models is not None: extra_body["models"] = models if route is not None: extra_body["route"] = route optional_params["extra_body"] = ( extra_body # openai client supports `extra_body` param ) elif custom_llm_provider == "watsonx": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) if max_tokens is not None: optional_params["max_new_tokens"] = max_tokens if stream: optional_params["stream"] = stream if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if frequency_penalty is not None: optional_params["repetition_penalty"] = frequency_penalty if seed is not None: optional_params["random_seed"] = seed if stop is not None: optional_params["stop_sequences"] = stop # WatsonX-only parameters extra_body = {} if "decoding_method" in passed_params: extra_body["decoding_method"] = passed_params.pop("decoding_method") if "min_tokens" in passed_params or "min_new_tokens" in passed_params: extra_body["min_new_tokens"] = passed_params.pop( "min_tokens", passed_params.pop("min_new_tokens") ) if "top_k" in passed_params: extra_body["top_k"] = passed_params.pop("top_k") if "truncate_input_tokens" in passed_params: extra_body["truncate_input_tokens"] = passed_params.pop( "truncate_input_tokens" ) if "length_penalty" in passed_params: extra_body["length_penalty"] = passed_params.pop("length_penalty") if "time_limit" in passed_params: extra_body["time_limit"] = passed_params.pop("time_limit") if "return_options" in passed_params: extra_body["return_options"] = passed_params.pop("return_options") optional_params["extra_body"] = ( extra_body # openai client supports `extra_body` param ) else: # assume passing in params for openai/azure openai supported_params = get_supported_openai_params( model=model, custom_llm_provider="openai" ) _check_valid_arg(supported_params=supported_params) if functions is not None: optional_params["functions"] = functions if function_call is not None: optional_params["function_call"] = function_call if temperature is not None: optional_params["temperature"] = temperature if top_p is not None: optional_params["top_p"] = top_p if n is not None: optional_params["n"] = n if stream is not None: optional_params["stream"] = stream if stream_options is not None: optional_params["stream_options"] = stream_options if stop is not None: optional_params["stop"] = stop if max_tokens is not None: optional_params["max_tokens"] = max_tokens if presence_penalty is not None: optional_params["presence_penalty"] = presence_penalty if frequency_penalty is not None: optional_params["frequency_penalty"] = frequency_penalty if logit_bias is not None: optional_params["logit_bias"] = logit_bias if user is not None: optional_params["user"] = user if response_format is not None: optional_params["response_format"] = response_format if seed is not None: optional_params["seed"] = seed if tools is not None: optional_params["tools"] = tools if tool_choice is not None: optional_params["tool_choice"] = tool_choice if max_retries is not None: optional_params["max_retries"] = max_retries if logprobs is not None: optional_params["logprobs"] = logprobs if top_logprobs is not None: optional_params["top_logprobs"] = top_logprobs if extra_headers is not None: optional_params["extra_headers"] = extra_headers if custom_llm_provider in ["openai", "azure"] + litellm.openai_compatible_providers: # for openai, azure we should pass the extra/passed params within `extra_body` https://github.com/openai/openai-python/blob/ac33853ba10d13ac149b1fa3ca6dba7d613065c9/src/openai/resources/models.py#L46 extra_body = passed_params.pop("extra_body", {}) for k in passed_params.keys(): if k not in default_params.keys(): extra_body[k] = passed_params[k] optional_params["extra_body"] = extra_body else: # if user passed in non-default kwargs for specific providers/models, pass them along for k in passed_params.keys(): if k not in default_params.keys(): optional_params[k] = passed_params[k] print_verbose(f"Final returned optional params: {optional_params}") return optional_params
(model: str, functions=None, function_call=None, temperature=None, top_p=None, n=None, stream=False, stream_options=None, stop=None, max_tokens=None, presence_penalty=None, frequency_penalty=None, logit_bias=None, user=None, custom_llm_provider='', response_format=None, seed=None, tools=None, tool_choice=None, max_retries=None, logprobs=None, top_logprobs=None, extra_headers=None, **kwargs)
64,314
litellm.utils
get_optional_params_embeddings
null
def get_optional_params_embeddings( # 2 optional params model=None, user=None, encoding_format=None, dimensions=None, custom_llm_provider="", **kwargs, ): # retrieve all parameters passed to the function passed_params = locals() custom_llm_provider = passed_params.pop("custom_llm_provider", None) special_params = passed_params.pop("kwargs") for k, v in special_params.items(): passed_params[k] = v default_params = {"user": None, "encoding_format": None, "dimensions": None} non_default_params = { k: v for k, v in passed_params.items() if (k in default_params and v != default_params[k]) } ## raise exception if non-default value passed for non-openai/azure embedding calls if custom_llm_provider == "openai": # 'dimensions` is only supported in `text-embedding-3` and later models if ( model is not None and "text-embedding-3" not in model and "dimensions" in non_default_params.keys() ): raise UnsupportedParamsError( status_code=500, message=f"Setting dimensions is not supported for OpenAI `text-embedding-3` and later models. To drop it from the call, set `litellm.drop_params = True`.", ) if custom_llm_provider == "triton": keys = list(non_default_params.keys()) for k in keys: non_default_params.pop(k, None) final_params = {**non_default_params, **kwargs} return final_params if custom_llm_provider == "vertex_ai": if len(non_default_params.keys()) > 0: if litellm.drop_params is True: # drop the unsupported non-default values keys = list(non_default_params.keys()) for k in keys: non_default_params.pop(k, None) final_params = {**non_default_params, **kwargs} return final_params raise UnsupportedParamsError( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) if custom_llm_provider == "bedrock": # if dimensions is in non_default_params -> pass it for model=bedrock/amazon.titan-embed-text-v2 if ( "dimensions" in non_default_params.keys() and "amazon.titan-embed-text-v2" in model ): kwargs["dimensions"] = non_default_params["dimensions"] non_default_params.pop("dimensions", None) if len(non_default_params.keys()) > 0: if litellm.drop_params is True: # drop the unsupported non-default values keys = list(non_default_params.keys()) for k in keys: non_default_params.pop(k, None) final_params = {**non_default_params, **kwargs} return final_params raise UnsupportedParamsError( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) return {**non_default_params, **kwargs} if ( custom_llm_provider != "openai" and custom_llm_provider != "azure" and custom_llm_provider not in litellm.openai_compatible_providers ): if len(non_default_params.keys()) > 0: if litellm.drop_params is True: # drop the unsupported non-default values keys = list(non_default_params.keys()) for k in keys: non_default_params.pop(k, None) else: raise UnsupportedParamsError( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) final_params = {**non_default_params, **kwargs} return final_params
(model=None, user=None, encoding_format=None, dimensions=None, custom_llm_provider='', **kwargs)
64,315
litellm.utils
get_optional_params_image_gen
null
def get_optional_params_image_gen( n: Optional[int] = None, quality: Optional[str] = None, response_format: Optional[str] = None, size: Optional[str] = None, style: Optional[str] = None, user: Optional[str] = None, custom_llm_provider: Optional[str] = None, **kwargs, ): # retrieve all parameters passed to the function passed_params = locals() custom_llm_provider = passed_params.pop("custom_llm_provider") special_params = passed_params.pop("kwargs") for k, v in special_params.items(): passed_params[k] = v default_params = { "n": None, "quality": None, "response_format": None, "size": None, "style": None, "user": None, } non_default_params = { k: v for k, v in passed_params.items() if (k in default_params and v != default_params[k]) } optional_params = {} ## raise exception if non-default value passed for non-openai/azure embedding calls def _check_valid_arg(supported_params): if len(non_default_params.keys()) > 0: keys = list(non_default_params.keys()) for k in keys: if ( litellm.drop_params is True and k not in supported_params ): # drop the unsupported non-default values non_default_params.pop(k, None) elif k not in supported_params: raise UnsupportedParamsError( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) return non_default_params if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" or custom_llm_provider in litellm.openai_compatible_providers ): optional_params = non_default_params elif custom_llm_provider == "bedrock": supported_params = ["size"] _check_valid_arg(supported_params=supported_params) if size is not None: width, height = size.split("x") optional_params["width"] = int(width) optional_params["height"] = int(height) for k in passed_params.keys(): if k not in default_params.keys(): optional_params[k] = passed_params[k] return optional_params
(n: Optional[int] = None, quality: Optional[str] = None, response_format: Optional[str] = None, size: Optional[str] = None, style: Optional[str] = None, user: Optional[str] = None, custom_llm_provider: Optional[str] = None, **kwargs)
64,316
litellm.utils
get_secret
null
def get_secret( secret_name: str, default_value: Optional[Union[str, bool]] = None, ): key_management_system = litellm._key_management_system key_management_settings = litellm._key_management_settings if secret_name.startswith("os.environ/"): secret_name = secret_name.replace("os.environ/", "") # Example: oidc/google/https://bedrock-runtime.us-east-1.amazonaws.com/model/stability.stable-diffusion-xl-v1/invoke if secret_name.startswith("oidc/"): secret_name_split = secret_name.replace("oidc/", "") oidc_provider, oidc_aud = secret_name_split.split("/", 1) # TODO: Add caching for HTTP requests if oidc_provider == "google": oidc_token = oidc_cache.get_cache(key=secret_name) if oidc_token is not None: return oidc_token oidc_client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) # https://cloud.google.com/compute/docs/instances/verifying-instance-identity#request_signature response = oidc_client.get( "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity", params={"audience": oidc_aud}, headers={"Metadata-Flavor": "Google"}, ) if response.status_code == 200: oidc_token = response.text oidc_cache.set_cache(key=secret_name, value=oidc_token, ttl=3600 - 60) return oidc_token else: raise ValueError("Google OIDC provider failed") elif oidc_provider == "circleci": # https://circleci.com/docs/openid-connect-tokens/ env_secret = os.getenv("CIRCLE_OIDC_TOKEN") if env_secret is None: raise ValueError("CIRCLE_OIDC_TOKEN not found in environment") return env_secret elif oidc_provider == "circleci_v2": # https://circleci.com/docs/openid-connect-tokens/ env_secret = os.getenv("CIRCLE_OIDC_TOKEN_V2") if env_secret is None: raise ValueError("CIRCLE_OIDC_TOKEN_V2 not found in environment") return env_secret elif oidc_provider == "github": # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#using-custom-actions actions_id_token_request_url = os.getenv("ACTIONS_ID_TOKEN_REQUEST_URL") actions_id_token_request_token = os.getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") if ( actions_id_token_request_url is None or actions_id_token_request_token is None ): raise ValueError( "ACTIONS_ID_TOKEN_REQUEST_URL or ACTIONS_ID_TOKEN_REQUEST_TOKEN not found in environment" ) oidc_token = oidc_cache.get_cache(key=secret_name) if oidc_token is not None: return oidc_token oidc_client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = oidc_client.get( actions_id_token_request_url, params={"audience": oidc_aud}, headers={ "Authorization": f"Bearer {actions_id_token_request_token}", "Accept": "application/json; api-version=2.0", }, ) if response.status_code == 200: oidc_token = response.text["value"] oidc_cache.set_cache(key=secret_name, value=oidc_token, ttl=300 - 5) return oidc_token else: raise ValueError("Github OIDC provider failed") else: raise ValueError("Unsupported OIDC provider") try: if litellm.secret_manager_client is not None: try: client = litellm.secret_manager_client key_manager = "local" if key_management_system is not None: key_manager = key_management_system.value if key_management_settings is not None: if ( secret_name not in key_management_settings.hosted_keys ): # allow user to specify which keys to check in hosted key manager key_manager = "local" if ( key_manager == KeyManagementSystem.AZURE_KEY_VAULT or type(client).__module__ + "." + type(client).__name__ == "azure.keyvault.secrets._client.SecretClient" ): # support Azure Secret Client - from azure.keyvault.secrets import SecretClient secret = client.get_secret(secret_name).value elif ( key_manager == KeyManagementSystem.GOOGLE_KMS or client.__class__.__name__ == "KeyManagementServiceClient" ): encrypted_secret: Any = os.getenv(secret_name) if encrypted_secret is None: raise ValueError( f"Google KMS requires the encrypted secret to be in the environment!" ) b64_flag = _is_base64(encrypted_secret) if b64_flag == True: # if passed in as encoded b64 string encrypted_secret = base64.b64decode(encrypted_secret) if not isinstance(encrypted_secret, bytes): # If it's not, assume it's a string and encode it to bytes ciphertext = eval( encrypted_secret.encode() ) # assuming encrypted_secret is something like - b'\n$\x00D\xac\xb4/t)07\xe5\xf6..' else: ciphertext = encrypted_secret response = client.decrypt( request={ "name": litellm._google_kms_resource_name, "ciphertext": ciphertext, } ) secret = response.plaintext.decode( "utf-8" ) # assumes the original value was encoded with utf-8 elif key_manager == KeyManagementSystem.AWS_SECRET_MANAGER.value: try: get_secret_value_response = client.get_secret_value( SecretId=secret_name ) print_verbose( f"get_secret_value_response: {get_secret_value_response}" ) except Exception as e: print_verbose(f"An error occurred - {str(e)}") # For a list of exceptions thrown, see # https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html raise e # assume there is 1 secret per secret_name secret_dict = json.loads(get_secret_value_response["SecretString"]) print_verbose(f"secret_dict: {secret_dict}") for k, v in secret_dict.items(): secret = v print_verbose(f"secret: {secret}") else: # assume the default is infisicial client secret = client.get_secret(secret_name).secret_value except Exception as e: # check if it's in os.environ print_verbose(f"An exception occurred - {str(e)}") secret = os.getenv(secret_name) try: secret_value_as_bool = ast.literal_eval(secret) if isinstance(secret_value_as_bool, bool): return secret_value_as_bool else: return secret except: return secret else: secret = os.environ.get(secret_name) try: secret_value_as_bool = ( ast.literal_eval(secret) if secret is not None else None ) if isinstance(secret_value_as_bool, bool): return secret_value_as_bool else: return secret except: return secret except Exception as e: if default_value is not None: return default_value else: raise e
(secret_name: str, default_value: Union[bool, str, NoneType] = None)
64,317
litellm.utils
get_supported_openai_params
Returns the supported openai params for a given model + provider Example: ``` get_supported_openai_params(model="anthropic.claude-3", custom_llm_provider="bedrock") ```
def get_supported_openai_params(model: str, custom_llm_provider: str): """ Returns the supported openai params for a given model + provider Example: ``` get_supported_openai_params(model="anthropic.claude-3", custom_llm_provider="bedrock") ``` """ if custom_llm_provider == "bedrock": if model.startswith("anthropic.claude-3"): return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() elif model.startswith("anthropic"): return litellm.AmazonAnthropicConfig().get_supported_openai_params() elif model.startswith("ai21"): return ["max_tokens", "temperature", "top_p", "stream"] elif model.startswith("amazon"): return ["max_tokens", "temperature", "stop", "top_p", "stream"] elif model.startswith("meta"): return ["max_tokens", "temperature", "top_p", "stream"] elif model.startswith("cohere"): return ["stream", "temperature", "max_tokens"] elif model.startswith("mistral"): return ["max_tokens", "temperature", "stop", "top_p", "stream"] elif custom_llm_provider == "ollama_chat": return litellm.OllamaChatConfig().get_supported_openai_params() elif custom_llm_provider == "anthropic": return litellm.AnthropicConfig().get_supported_openai_params() elif custom_llm_provider == "groq": return [ "temperature", "max_tokens", "top_p", "stream", "stop", "tools", "tool_choice", "response_format", "seed", ] elif custom_llm_provider == "deepseek": return [ # https://platform.deepseek.com/api-docs/api/create-chat-completion "frequency_penalty", "max_tokens", "presence_penalty", "stop", "stream", "temperature", "top_p", "logprobs", "top_logprobs", ] elif custom_llm_provider == "cohere": return [ "stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop", "n", ] elif custom_llm_provider == "cohere_chat": return [ "stream", "temperature", "max_tokens", "top_p", "frequency_penalty", "presence_penalty", "stop", "n", "tools", "tool_choice", "seed", ] elif custom_llm_provider == "maritalk": return [ "stream", "temperature", "max_tokens", "top_p", "presence_penalty", "stop", ] elif custom_llm_provider == "openai" or custom_llm_provider == "azure": return [ "functions", "function_call", "temperature", "top_p", "n", "stream", "stream_options", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries", "logprobs", "top_logprobs", "extra_headers", ] elif custom_llm_provider == "openrouter": return [ "functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries", ] elif custom_llm_provider == "mistral": return [ "temperature", "top_p", "stream", "max_tokens", "tools", "tool_choice", "response_format", ] elif custom_llm_provider == "replicate": return [ "stream", "temperature", "max_tokens", "top_p", "stop", "seed", ] elif custom_llm_provider == "huggingface": return litellm.HuggingfaceConfig().get_supported_openai_params() elif custom_llm_provider == "together_ai": return [ "stream", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "tools", "tool_choice", ] elif custom_llm_provider == "ai21": return [ "stream", "n", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "presence_penalty", ] elif custom_llm_provider == "palm" or custom_llm_provider == "gemini": return ["temperature", "top_p", "stream", "n", "stop", "max_tokens"] elif custom_llm_provider == "vertex_ai": return litellm.VertexAIConfig().get_supported_openai_params() elif custom_llm_provider == "sagemaker": return ["stream", "temperature", "max_tokens", "top_p", "stop", "n"] elif custom_llm_provider == "aleph_alpha": return [ "max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop", ] elif custom_llm_provider == "cloudflare": return ["max_tokens", "stream"] elif custom_llm_provider == "ollama": return [ "max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop", "response_format", ] elif custom_llm_provider == "nlp_cloud": return [ "max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop", ] elif custom_llm_provider == "petals": return ["max_tokens", "temperature", "top_p", "stream"] elif custom_llm_provider == "deepinfra": return [ "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", ] elif custom_llm_provider == "perplexity": return [ "temperature", "top_p", "stream", "max_tokens", "presence_penalty", "frequency_penalty", ] elif custom_llm_provider == "anyscale": return [ "temperature", "top_p", "stream", "max_tokens", "stop", "frequency_penalty", "presence_penalty", ] elif custom_llm_provider == "watsonx": return litellm.IBMWatsonXAIConfig().get_supported_openai_params()
(model: str, custom_llm_provider: str)
64,318
litellm.assistants.main
get_thread
Get the thread object, given a thread_id
def get_thread( custom_llm_provider: Literal["openai"], thread_id: str, client: Optional[OpenAI] = None, **kwargs, ) -> Thread: """Get the thread object, given a thread_id""" optional_params = GenericLiteLLMParams(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default if ( timeout is not None and isinstance(timeout, httpx.Timeout) and supports_httpx_timeout(custom_llm_provider) == False ): read_timeout = timeout.read or 600 timeout = read_timeout # default 10 min timeout elif timeout is not None and not isinstance(timeout, httpx.Timeout): timeout = float(timeout) # type: ignore elif timeout is None: timeout = 600.0 response: Optional[Thread] = None if custom_llm_provider == "openai": api_base = ( optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there or litellm.api_base or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1" ) organization = ( optional_params.organization or litellm.organization or os.getenv("OPENAI_ORGANIZATION", None) or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there or litellm.openai_key or os.getenv("OPENAI_API_KEY") ) response = openai_assistants_api.get_thread( thread_id=thread_id, api_base=api_base, api_key=api_key, timeout=timeout, max_retries=optional_params.max_retries, organization=organization, client=client, ) else: raise litellm.exceptions.BadRequestError( message="LiteLLM doesn't support {} for 'get_thread'. Only 'openai' is supported.".format( custom_llm_provider ), model="n/a", llm_provider=custom_llm_provider, response=httpx.Response( status_code=400, content="Unsupported provider", request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response
(custom_llm_provider: Literal['openai'], thread_id: str, client: Optional[openai.OpenAI] = None, **kwargs) -> litellm.types.llms.openai.Thread
64,323
litellm
identify
null
def identify(event_details): # Store user in thread local data if "user" in event_details: _thread_context.user = event_details["user"]
(event_details)
64,324
litellm.main
image_generation
Maps the https://api.openai.com/v1/images/generations endpoint. Currently supports just Azure + OpenAI.
def embedding( model, input=[], # Optional params dimensions: Optional[int] = None, timeout=600, # default to 10 minutes # set api_base, api_version, api_key api_base: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool = False, user: Optional[str] = None, custom_llm_provider=None, litellm_call_id=None, litellm_logging_obj=None, logger_fn=None, **kwargs, ): """ Embedding function that calls an API to generate embeddings for the given input. Parameters: - model: The embedding model to use. - input: The input for which embeddings are to be generated. - dimensions: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. - timeout: The timeout value for the API call, default 10 mins - litellm_call_id: The call ID for litellm logging. - litellm_logging_obj: The litellm logging object. - logger_fn: The logger function. - api_base: Optional. The base URL for the API. - api_version: Optional. The version of the API. - api_key: Optional. The API key to use. - api_type: Optional. The type of the API. - caching: A boolean indicating whether to enable caching. - custom_llm_provider: The custom llm provider. Returns: - response: The response received from the API call. Raises: - exception_type: If an exception occurs during the API call. """ azure = kwargs.get("azure", None) client = kwargs.pop("client", None) rpm = kwargs.pop("rpm", None) tpm = kwargs.pop("tpm", None) max_parallel_requests = kwargs.pop("max_parallel_requests", None) model_info = kwargs.get("model_info", None) metadata = kwargs.get("metadata", None) encoding_format = kwargs.get("encoding_format", None) proxy_server_request = kwargs.get("proxy_server_request", None) aembedding = kwargs.get("aembedding", None) ### CUSTOM MODEL COST ### input_cost_per_token = kwargs.get("input_cost_per_token", None) output_cost_per_token = kwargs.get("output_cost_per_token", None) input_cost_per_second = kwargs.get("input_cost_per_second", None) output_cost_per_second = kwargs.get("output_cost_per_second", None) openai_params = [ "user", "dimensions", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "max_retries", "encoding_format", ] litellm_params = [ "metadata", "aembedding", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "retry_policy", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "max_parallel_requests", "input_cost_per_token", "output_cost_per_token", "input_cost_per_second", "output_cost_per_second", "hf_model_name", "proxy_server_request", "model_info", "preset_cache_key", "caching_groups", "ttl", "cache", "no-log", "region_name", "allowed_model_region", ] default_params = openai_params + litellm_params non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key, ) optional_params = get_optional_params_embeddings( model=model, user=user, dimensions=dimensions, encoding_format=encoding_format, custom_llm_provider=custom_llm_provider, **non_default_params, ) ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ### if input_cost_per_token is not None and output_cost_per_token is not None: litellm.register_model( { model: { "input_cost_per_token": input_cost_per_token, "output_cost_per_token": output_cost_per_token, "litellm_provider": custom_llm_provider, } } ) if input_cost_per_second is not None: # time based pricing just needs cost in place output_cost_per_second = output_cost_per_second or 0.0 litellm.register_model( { model: { "input_cost_per_second": input_cost_per_second, "output_cost_per_second": output_cost_per_second, "litellm_provider": custom_llm_provider, } } ) try: response = None logging = litellm_logging_obj logging.update_environment_variables( model=model, user=user, optional_params=optional_params, litellm_params={ "timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info, "metadata": metadata, "aembedding": aembedding, "preset_cache_key": None, "stream_response": {}, }, ) if azure == True or custom_llm_provider == "azure": # azure configs api_type = get_secret("AZURE_API_TYPE") or "azure" api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE") api_version = ( api_version or litellm.api_version or get_secret("AZURE_API_VERSION") ) azure_ad_token = optional_params.pop("azure_ad_token", None) or get_secret( "AZURE_AD_TOKEN" ) api_key = ( api_key or litellm.api_key or litellm.azure_key or get_secret("AZURE_API_KEY") ) ## EMBEDDING CALL response = azure_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif ( model in litellm.open_ai_embedding_models or custom_llm_provider == "openai" ): api_base = ( api_base or litellm.api_base or get_secret("OPENAI_API_BASE") or "https://api.openai.com/v1" ) openai.organization = ( litellm.organization or get_secret("OPENAI_ORGANIZATION") or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY api_key = ( api_key or litellm.api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") ) api_type = "openai" api_version = None ## EMBEDDING CALL response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "cohere": cohere_key = ( api_key or litellm.cohere_key or get_secret("COHERE_API_KEY") or get_secret("CO_API_KEY") or litellm.api_key ) response = cohere.embedding( model=model, input=input, optional_params=optional_params, encoding=encoding, api_key=cohere_key, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "huggingface": api_key = ( api_key or litellm.huggingface_key or get_secret("HUGGINGFACE_API_KEY") or litellm.api_key ) response = huggingface.embedding( model=model, input=input, encoding=encoding, api_key=api_key, api_base=api_base, logging_obj=logging, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "bedrock": response = bedrock.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "triton": if api_base is None: raise ValueError( "api_base is required for triton. Please pass `api_base`" ) response = triton_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "vertex_ai": vertex_ai_project = ( optional_params.pop("vertex_project", None) or optional_params.pop("vertex_ai_project", None) or litellm.vertex_project or get_secret("VERTEXAI_PROJECT") or get_secret("VERTEX_PROJECT") ) vertex_ai_location = ( optional_params.pop("vertex_location", None) or optional_params.pop("vertex_ai_location", None) or litellm.vertex_location or get_secret("VERTEXAI_LOCATION") or get_secret("VERTEX_LOCATION") ) vertex_credentials = ( optional_params.pop("vertex_credentials", None) or optional_params.pop("vertex_ai_credentials", None) or get_secret("VERTEXAI_CREDENTIALS") or get_secret("VERTEX_CREDENTIALS") ) response = vertex_ai.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), vertex_project=vertex_ai_project, vertex_location=vertex_ai_location, vertex_credentials=vertex_credentials, aembedding=aembedding, print_verbose=print_verbose, ) elif custom_llm_provider == "oobabooga": response = oobabooga.embedding( model=model, input=input, encoding=encoding, api_base=api_base, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "ollama": api_base = ( litellm.api_base or api_base or get_secret("OLLAMA_API_BASE") or "http://localhost:11434" ) if isinstance(input, str): input = [input] if not all(isinstance(item, str) for item in input): raise litellm.BadRequestError( message=f"Invalid input for ollama embeddings. input={input}", model=model, # type: ignore llm_provider="ollama", # type: ignore ) ollama_embeddings_fn = ( ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings ) response = ollama_embeddings_fn( api_base=api_base, model=model, prompts=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) elif custom_llm_provider == "sagemaker": response = sagemaker.embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), print_verbose=print_verbose, ) elif custom_llm_provider == "mistral": api_key = api_key or litellm.api_key or get_secret("MISTRAL_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "voyage": api_key = api_key or litellm.api_key or get_secret("VOYAGE_API_KEY") response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "xinference": api_key = ( api_key or litellm.api_key or get_secret("XINFERENCE_API_KEY") or "stub-xinference-key" ) # xinference does not need an api key, pass a stub key if user did not set one api_base = ( api_base or litellm.api_base or get_secret("XINFERENCE_API_BASE") or "http://127.0.0.1:9997/v1" ) response = openai_chat_completions.embedding( model=model, input=input, api_base=api_base, api_key=api_key, logging_obj=logging, timeout=timeout, model_response=EmbeddingResponse(), optional_params=optional_params, client=client, aembedding=aembedding, ) elif custom_llm_provider == "watsonx": response = watsonx.IBMWatsonXAI().embedding( model=model, input=input, encoding=encoding, logging_obj=logging, optional_params=optional_params, model_response=EmbeddingResponse(), ) else: args = locals() raise ValueError(f"No valid embedding model args passed in - {args}") if response is not None and hasattr(response, "_hidden_params"): response._hidden_params["custom_llm_provider"] = custom_llm_provider return response except Exception as e: ## LOGGING logging.post_call( input=input, api_key=api_key, original_response=str(e), ) ## Map to OpenAI Exception raise exception_type( model=model, original_exception=e, custom_llm_provider=custom_llm_provider, extra_kwargs=kwargs, )
(prompt: str, model: Optional[str] = None, n: Optional[int] = None, quality: Optional[str] = None, response_format: Optional[str] = None, size: Optional[str] = None, style: Optional[str] = None, user: Optional[str] = None, timeout=600, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, litellm_logging_obj=None, custom_llm_provider=None, **kwargs)