index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
62,833
litellm.llms.bedrock
AmazonAnthropicClaude3Config
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=claude https://docs.anthropic.com/claude/docs/models-overview#model-comparison Supported Params for the Amazon / Anthropic Claude 3 models: - `max_tokens` Required (integer) max tokens. Default is 4096 - `anthropic_version` Required (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31" - `system` Optional (string) the system prompt, conversion from openai format to this is handled in factory.py - `temperature` Optional (float) The amount of randomness injected into the response - `top_p` Optional (float) Use nucleus sampling. - `top_k` Optional (int) Only sample from the top K options for each subsequent token - `stop_sequences` Optional (List[str]) Custom text sequences that cause the model to stop generating
class AmazonAnthropicClaude3Config: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=claude https://docs.anthropic.com/claude/docs/models-overview#model-comparison Supported Params for the Amazon / Anthropic Claude 3 models: - `max_tokens` Required (integer) max tokens. Default is 4096 - `anthropic_version` Required (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31" - `system` Optional (string) the system prompt, conversion from openai format to this is handled in factory.py - `temperature` Optional (float) The amount of randomness injected into the response - `top_p` Optional (float) Use nucleus sampling. - `top_k` Optional (int) Only sample from the top K options for each subsequent token - `stop_sequences` Optional (List[str]) Custom text sequences that cause the model to stop generating """ max_tokens: Optional[int] = 4096 # Opus, Sonnet, and Haiku default anthropic_version: Optional[str] = "bedrock-2023-05-31" system: Optional[str] = None temperature: Optional[float] = None top_p: Optional[float] = None top_k: Optional[int] = None stop_sequences: Optional[List[str]] = None def __init__( self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "max_tokens", "tools", "tool_choice", "stream", "stop", "temperature", "top_p", "extra_headers", ] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream": optional_params["stream"] = value if param == "stop": optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value return optional_params
(max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None) -> None
62,834
litellm.llms.bedrock
__init__
null
def __init__( self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, anthropic_version: Optional[str] = None) -> NoneType
62,835
litellm.llms.bedrock
get_supported_openai_params
null
def get_supported_openai_params(self): return [ "max_tokens", "tools", "tool_choice", "stream", "stop", "temperature", "top_p", "extra_headers", ]
(self)
62,836
litellm.llms.bedrock
map_openai_params
null
def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream": optional_params["stream"] = value if param == "stop": optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value return optional_params
(self, non_default_params: dict, optional_params: dict)
62,837
litellm.llms.bedrock
AmazonAnthropicConfig
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=claude Supported Params for the Amazon / Anthropic models: - `max_tokens_to_sample` (integer) max tokens, - `temperature` (float) model temperature, - `top_k` (integer) top k, - `top_p` (integer) top p, - `stop_sequences` (string[]) list of stop sequences - e.g. ["\n\nHuman:"], - `anthropic_version` (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31"
class AmazonAnthropicConfig: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=claude Supported Params for the Amazon / Anthropic models: - `max_tokens_to_sample` (integer) max tokens, - `temperature` (float) model temperature, - `top_k` (integer) top k, - `top_p` (integer) top p, - `stop_sequences` (string[]) list of stop sequences - e.g. ["\\n\\nHuman:"], - `anthropic_version` (string) version of anthropic for bedrock - e.g. "bedrock-2023-05-31" """ max_tokens_to_sample: Optional[int] = litellm.max_tokens stop_sequences: Optional[list] = None temperature: Optional[float] = None top_k: Optional[int] = None top_p: Optional[int] = None anthropic_version: Optional[str] = None def __init__( self, max_tokens_to_sample: Optional[int] = None, stop_sequences: Optional[list] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params( self, ): return ["max_tokens", "temperature", "stop", "top_p", "stream"] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens_to_sample"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if param == "stop": optional_params["stop_sequences"] = value if param == "stream" and value == True: optional_params["stream"] = value return optional_params
(max_tokens_to_sample: Optional[int] = None, stop_sequences: Optional[list] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[int] = None, anthropic_version: Optional[str] = None) -> None
62,838
litellm.llms.bedrock
__init__
null
def __init__( self, max_tokens_to_sample: Optional[int] = None, stop_sequences: Optional[list] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[int] = None, anthropic_version: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens_to_sample: Optional[int] = None, stop_sequences: Optional[list] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[int] = None, anthropic_version: Optional[str] = None) -> NoneType
62,839
litellm.llms.bedrock
get_supported_openai_params
null
def get_supported_openai_params( self, ): return ["max_tokens", "temperature", "stop", "top_p", "stream"]
(self)
62,840
litellm.llms.bedrock
map_openai_params
null
def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens_to_sample"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if param == "stop": optional_params["stop_sequences"] = value if param == "stream" and value == True: optional_params["stream"] = value return optional_params
(self, non_default_params: dict, optional_params: dict)
62,841
litellm.llms.bedrock
AmazonBedrockGlobalConfig
null
class AmazonBedrockGlobalConfig: def __init__(self): pass def get_mapped_special_auth_params(self) -> dict: """ Mapping of common auth params across bedrock/vertex/azure/watsonx """ return {"region_name": "aws_region_name"} def map_special_auth_params(self, non_default_params: dict, optional_params: dict): mapped_params = self.get_mapped_special_auth_params() for param, value in non_default_params.items(): if param in mapped_params: optional_params[mapped_params[param]] = value return optional_params def get_eu_regions(self) -> List[str]: """ Source: https://www.aws-services.info/bedrock.html """ return [ "eu-west-1", "eu-west-3", "eu-central-1", ]
()
62,843
litellm.llms.bedrock
get_eu_regions
Source: https://www.aws-services.info/bedrock.html
def get_eu_regions(self) -> List[str]: """ Source: https://www.aws-services.info/bedrock.html """ return [ "eu-west-1", "eu-west-3", "eu-central-1", ]
(self) -> List[str]
62,844
litellm.llms.bedrock
get_mapped_special_auth_params
Mapping of common auth params across bedrock/vertex/azure/watsonx
def get_mapped_special_auth_params(self) -> dict: """ Mapping of common auth params across bedrock/vertex/azure/watsonx """ return {"region_name": "aws_region_name"}
(self) -> dict
62,845
litellm.llms.bedrock
map_special_auth_params
null
def map_special_auth_params(self, non_default_params: dict, optional_params: dict): mapped_params = self.get_mapped_special_auth_params() for param, value in non_default_params.items(): if param in mapped_params: optional_params[mapped_params[param]] = value return optional_params
(self, non_default_params: dict, optional_params: dict)
62,846
litellm.llms.bedrock_httpx
AmazonCohereChatConfig
Reference - https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html
class AmazonCohereChatConfig: """ Reference - https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html """ documents: Optional[List[Document]] = None search_queries_only: Optional[bool] = None preamble: Optional[str] = None max_tokens: Optional[int] = None temperature: Optional[float] = None p: Optional[float] = None k: Optional[float] = None prompt_truncation: Optional[str] = None frequency_penalty: Optional[float] = None presence_penalty: Optional[float] = None seed: Optional[int] = None return_prompt: Optional[bool] = None stop_sequences: Optional[List[str]] = None raw_prompting: Optional[bool] = None def __init__( self, documents: Optional[List[Document]] = None, search_queries_only: Optional[bool] = None, preamble: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, p: Optional[float] = None, k: Optional[float] = None, prompt_truncation: Optional[str] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, seed: Optional[int] = None, return_prompt: Optional[bool] = None, stop_sequences: Optional[str] = None, raw_prompting: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self) -> List[str]: return [ "max_tokens", "stream", "stop", "temperature", "top_p", "frequency_penalty", "presence_penalty", "seed", "stop", ] def map_openai_params( self, non_default_params: dict, optional_params: dict ) -> dict: for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "stream": optional_params["stream"] = value if param == "stop": if isinstance(value, str): value = [value] optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["p"] = value if param == "frequency_penalty": optional_params["frequency_penalty"] = value if param == "presence_penalty": optional_params["presence_penalty"] = value if "seed": optional_params["seed"] = value return optional_params
(documents: Optional[List[litellm.types.llms.bedrock.Document]] = None, search_queries_only: Optional[bool] = None, preamble: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, p: Optional[float] = None, k: Optional[float] = None, prompt_truncation: Optional[str] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, seed: Optional[int] = None, return_prompt: Optional[bool] = None, stop_sequences: Optional[List[str]] = None, raw_prompting: Optional[bool] = None) -> None
62,847
litellm.llms.bedrock_httpx
__init__
null
def __init__( self, documents: Optional[List[Document]] = None, search_queries_only: Optional[bool] = None, preamble: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, p: Optional[float] = None, k: Optional[float] = None, prompt_truncation: Optional[str] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, seed: Optional[int] = None, return_prompt: Optional[bool] = None, stop_sequences: Optional[str] = None, raw_prompting: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, documents: Optional[List[litellm.types.llms.bedrock.Document]] = None, search_queries_only: Optional[bool] = None, preamble: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, p: Optional[float] = None, k: Optional[float] = None, prompt_truncation: Optional[str] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, seed: Optional[int] = None, return_prompt: Optional[bool] = None, stop_sequences: Optional[str] = None, raw_prompting: Optional[bool] = None) -> NoneType
62,848
litellm.llms.bedrock_httpx
get_supported_openai_params
null
def get_supported_openai_params(self) -> List[str]: return [ "max_tokens", "stream", "stop", "temperature", "top_p", "frequency_penalty", "presence_penalty", "seed", "stop", ]
(self) -> List[str]
62,849
litellm.llms.bedrock_httpx
map_openai_params
null
def map_openai_params( self, non_default_params: dict, optional_params: dict ) -> dict: for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "stream": optional_params["stream"] = value if param == "stop": if isinstance(value, str): value = [value] optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["p"] = value if param == "frequency_penalty": optional_params["frequency_penalty"] = value if param == "presence_penalty": optional_params["presence_penalty"] = value if "seed": optional_params["seed"] = value return optional_params
(self, non_default_params: dict, optional_params: dict) -> dict
62,850
litellm.llms.bedrock
AmazonCohereConfig
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=command Supported Params for the Amazon / Cohere models: - `max_tokens` (integer) max tokens, - `temperature` (float) model temperature, - `return_likelihood` (string) n/a
class AmazonCohereConfig: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=command Supported Params for the Amazon / Cohere models: - `max_tokens` (integer) max tokens, - `temperature` (float) model temperature, - `return_likelihood` (string) n/a """ max_tokens: Optional[int] = None temperature: Optional[float] = None return_likelihood: Optional[str] = None def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, return_likelihood: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens: Optional[int] = None, temperature: Optional[float] = None, return_likelihood: Optional[str] = None) -> None
62,851
litellm.llms.bedrock
__init__
null
def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, return_likelihood: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, return_likelihood: Optional[str] = None) -> NoneType
62,852
litellm.llms.bedrock
AmazonLlamaConfig
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=meta.llama2-13b-chat-v1 Supported Params for the Amazon / Meta Llama models: - `max_gen_len` (integer) max tokens, - `temperature` (float) temperature for model, - `top_p` (float) top p for model
class AmazonLlamaConfig: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=meta.llama2-13b-chat-v1 Supported Params for the Amazon / Meta Llama models: - `max_gen_len` (integer) max tokens, - `temperature` (float) temperature for model, - `top_p` (float) top p for model """ max_gen_len: Optional[int] = None temperature: Optional[float] = None topP: Optional[float] = None def __init__( self, maxTokenCount: Optional[int] = None, temperature: Optional[float] = None, topP: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(maxTokenCount: Optional[int] = None, temperature: Optional[float] = None, topP: Optional[float] = None) -> None
62,853
litellm.llms.bedrock
__init__
null
def __init__( self, maxTokenCount: Optional[int] = None, temperature: Optional[float] = None, topP: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, maxTokenCount: Optional[int] = None, temperature: Optional[float] = None, topP: Optional[int] = None) -> NoneType
62,854
litellm.llms.bedrock
AmazonMistralConfig
Reference: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral.html Supported Params for the Amazon / Mistral models: - `max_tokens` (integer) max tokens, - `temperature` (float) temperature for model, - `top_p` (float) top p for model - `stop` [string] A list of stop sequences that if generated by the model, stops the model from generating further output. - `top_k` (float) top k for model
class AmazonMistralConfig: """ Reference: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral.html Supported Params for the Amazon / Mistral models: - `max_tokens` (integer) max tokens, - `temperature` (float) temperature for model, - `top_p` (float) top p for model - `stop` [string] A list of stop sequences that if generated by the model, stops the model from generating further output. - `top_k` (float) top k for model """ max_tokens: Optional[int] = None temperature: Optional[float] = None top_p: Optional[float] = None top_k: Optional[float] = None stop: Optional[List[str]] = None def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[int] = None, top_k: Optional[float] = None, stop: Optional[List[str]] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[float] = None, stop: Optional[List[str]] = None) -> None
62,855
litellm.llms.bedrock
__init__
null
def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[int] = None, top_k: Optional[float] = None, stop: Optional[List[str]] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[int] = None, top_k: Optional[float] = None, stop: Optional[List[str]] = None) -> NoneType
62,856
litellm.llms.bedrock
AmazonStabilityConfig
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 Supported Params for the Amazon / Stable Diffusion models: - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. Engine-specific dimension validation: - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - SDXL v1.0: same as SDXL v0.9 - SD v1.6: must be between 320x320 and 1536x1536 - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. Engine-specific dimension validation: - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - SDXL v1.0: same as SDXL v0.9 - SD v1.6: must be between 320x320 and 1536x1536
class AmazonStabilityConfig: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 Supported Params for the Amazon / Stable Diffusion models: - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. Engine-specific dimension validation: - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - SDXL v1.0: same as SDXL v0.9 - SD v1.6: must be between 320x320 and 1536x1536 - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. Engine-specific dimension validation: - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - SDXL v1.0: same as SDXL v0.9 - SD v1.6: must be between 320x320 and 1536x1536 """ cfg_scale: Optional[int] = None seed: Optional[float] = None steps: Optional[List[str]] = None width: Optional[int] = None height: Optional[int] = None def __init__( self, cfg_scale: Optional[int] = None, seed: Optional[float] = None, steps: Optional[List[str]] = None, width: Optional[int] = None, height: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(cfg_scale: Optional[int] = None, seed: Optional[float] = None, steps: Optional[List[str]] = None, width: Optional[int] = None, height: Optional[int] = None) -> None
62,857
litellm.llms.bedrock
__init__
null
def __init__( self, cfg_scale: Optional[int] = None, seed: Optional[float] = None, steps: Optional[List[str]] = None, width: Optional[int] = None, height: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, cfg_scale: Optional[int] = None, seed: Optional[float] = None, steps: Optional[List[str]] = None, width: Optional[int] = None, height: Optional[int] = None) -> NoneType
62,858
litellm.llms.bedrock
AmazonTitanConfig
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-text-express-v1 Supported Params for the Amazon Titan models: - `maxTokenCount` (integer) max tokens, - `stopSequences` (string[]) list of stop sequence strings - `temperature` (float) temperature for model, - `topP` (int) top p for model
class AmazonTitanConfig: """ Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-text-express-v1 Supported Params for the Amazon Titan models: - `maxTokenCount` (integer) max tokens, - `stopSequences` (string[]) list of stop sequence strings - `temperature` (float) temperature for model, - `topP` (int) top p for model """ maxTokenCount: Optional[int] = None stopSequences: Optional[list] = None temperature: Optional[float] = None topP: Optional[int] = None def __init__( self, maxTokenCount: Optional[int] = None, stopSequences: Optional[list] = None, temperature: Optional[float] = None, topP: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(maxTokenCount: Optional[int] = None, stopSequences: Optional[list] = None, temperature: Optional[float] = None, topP: Optional[int] = None) -> None
62,859
litellm.llms.bedrock
__init__
null
def __init__( self, maxTokenCount: Optional[int] = None, stopSequences: Optional[list] = None, temperature: Optional[float] = None, topP: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, maxTokenCount: Optional[int] = None, stopSequences: Optional[list] = None, temperature: Optional[float] = None, topP: Optional[int] = None) -> NoneType
62,860
litellm.llms.anthropic
AnthropicChatCompletion
null
class AnthropicChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() def process_streaming_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> CustomStreamWrapper: """ Return stream object for tool-calling + streaming """ ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) text_content = "" tool_calls = [] for content in completion_response["content"]: if content["type"] == "text": text_content += content["text"] ## TOOL CALLING elif content["type"] == "tool_use": tool_calls.append( { "id": content["id"], "type": "function", "function": { "name": content["name"], "arguments": json.dumps(content["input"]), }, } ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) _message = litellm.Message( tool_calls=tool_calls, content=text_content or None, ) model_response.choices[0].message = _message # type: ignore model_response._hidden_params["original_response"] = completion_response[ "content" ] # allow user to access raw anthropic tool calling response model_response.choices[0].finish_reason = map_finish_reason( completion_response["stop_reason"] ) print_verbose("INSIDE ANTHROPIC STREAMING TOOL CALLING CONDITION BLOCK") # return an iterator streaming_model_response = ModelResponse(stream=True) streaming_model_response.choices[0].finish_reason = model_response.choices[ # type: ignore 0 ].finish_reason # streaming_model_response.choices = [litellm.utils.StreamingChoices()] streaming_choice = litellm.utils.StreamingChoices() streaming_choice.index = model_response.choices[0].index _tool_calls = [] print_verbose( f"type of model_response.choices[0]: {type(model_response.choices[0])}" ) print_verbose(f"type of streaming_choice: {type(streaming_choice)}") if isinstance(model_response.choices[0], litellm.Choices): if getattr( model_response.choices[0].message, "tool_calls", None ) is not None and isinstance( model_response.choices[0].message.tool_calls, list ): for tool_call in model_response.choices[0].message.tool_calls: _tool_call = {**tool_call.dict(), "index": 0} _tool_calls.append(_tool_call) delta_obj = litellm.utils.Delta( content=getattr(model_response.choices[0].message, "content", None), role=model_response.choices[0].message.role, tool_calls=_tool_calls, ) streaming_choice.delta = delta_obj streaming_model_response.choices = [streaming_choice] completion_stream = ModelResponseIterator( model_response=streaming_model_response ) print_verbose( "Returns anthropic CustomStreamWrapper with 'cached_response' streaming object" ) return CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="cached_response", logging_obj=logging_obj, ) else: raise AnthropicError( status_code=422, message="Unprocessable response object - {}".format(response.text), ) def process_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> ModelResponse: ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) else: text_content = "" tool_calls = [] for content in completion_response["content"]: if content["type"] == "text": text_content += content["text"] ## TOOL CALLING elif content["type"] == "tool_use": tool_calls.append( { "id": content["id"], "type": "function", "function": { "name": content["name"], "arguments": json.dumps(content["input"]), }, } ) _message = litellm.Message( tool_calls=tool_calls, content=text_content or None, ) model_response.choices[0].message = _message # type: ignore model_response._hidden_params["original_response"] = completion_response[ "content" ] # allow user to access raw anthropic tool calling response model_response.choices[0].finish_reason = map_finish_reason( completion_response["stop_reason"] ) ## CALCULATING USAGE prompt_tokens = completion_response["usage"]["input_tokens"] completion_tokens = completion_response["usage"]["output_tokens"] total_tokens = prompt_tokens + completion_tokens model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens, ) setattr(model_response, "usage", usage) # type: ignore return model_response async def acompletion_stream_function( self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, stream, _is_function_call, data: dict, optional_params=None, litellm_params=None, logger_fn=None, headers={}, ): self.async_handler = AsyncHTTPHandler( timeout=httpx.Timeout(timeout=600.0, connect=5.0) ) data["stream"] = True response = await self.async_handler.post( api_base, headers=headers, data=json.dumps(data), stream=True ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.aiter_lines() streamwrapper = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic", logging_obj=logging_obj, ) return streamwrapper async def acompletion_function( self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, stream, _is_function_call, data: dict, optional_params: dict, litellm_params=None, logger_fn=None, headers={}, ) -> Union[ModelResponse, CustomStreamWrapper]: self.async_handler = AsyncHTTPHandler( timeout=httpx.Timeout(timeout=600.0, connect=5.0) ) response = await self.async_handler.post( api_base, headers=headers, data=json.dumps(data) ) if stream and _is_function_call: return self.process_streaming_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) def completion( self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params: dict, acompletion=None, litellm_params=None, logger_fn=None, headers={}, ): headers = validate_environment(api_key, headers) _is_function_call = False messages = copy.deepcopy(messages) optional_params = copy.deepcopy(optional_params) if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: # Separate system prompt from rest of message system_prompt_indices = [] system_prompt = "" for idx, message in enumerate(messages): if message["role"] == "system": system_prompt += message["content"] system_prompt_indices.append(idx) if len(system_prompt_indices) > 0: for idx in reversed(system_prompt_indices): messages.pop(idx) if len(system_prompt) > 0: optional_params["system"] = system_prompt # Format rest of message according to anthropic guidelines try: messages = prompt_factory( model=model, messages=messages, custom_llm_provider="anthropic" ) except Exception as e: raise AnthropicError(status_code=400, message=str(e)) ## Load Config config = litellm.AnthropicConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v ## Handle Tool Calling if "tools" in optional_params: _is_function_call = True headers["anthropic-beta"] = "tools-2024-04-04" anthropic_tools = [] for tool in optional_params["tools"]: new_tool = tool["function"] new_tool["input_schema"] = new_tool.pop("parameters") # rename key anthropic_tools.append(new_tool) optional_params["tools"] = anthropic_tools stream = optional_params.pop("stream", None) data = { "model": model, "messages": messages, **optional_params, } ## LOGGING logging_obj.pre_call( input=messages, api_key=api_key, additional_args={ "complete_input_dict": data, "api_base": api_base, "headers": headers, }, ) print_verbose(f"_is_function_call: {_is_function_call}") if acompletion == True: if ( stream and not _is_function_call ): # if function call - fake the streaming (need complete blocks for output parsing in openai format) print_verbose("makes async anthropic streaming POST request") data["stream"] = stream return self.acompletion_stream_function( model=model, messages=messages, data=data, api_base=api_base, custom_prompt_dict=custom_prompt_dict, model_response=model_response, print_verbose=print_verbose, encoding=encoding, api_key=api_key, logging_obj=logging_obj, optional_params=optional_params, stream=stream, _is_function_call=_is_function_call, litellm_params=litellm_params, logger_fn=logger_fn, headers=headers, ) else: return self.acompletion_function( model=model, messages=messages, data=data, api_base=api_base, custom_prompt_dict=custom_prompt_dict, model_response=model_response, print_verbose=print_verbose, encoding=encoding, api_key=api_key, logging_obj=logging_obj, optional_params=optional_params, stream=stream, _is_function_call=_is_function_call, litellm_params=litellm_params, logger_fn=logger_fn, headers=headers, ) else: ## COMPLETION CALL if ( stream and not _is_function_call ): # if function call - fake the streaming (need complete blocks for output parsing in openai format) print_verbose("makes anthropic streaming POST request") data["stream"] = stream response = requests.post( api_base, headers=headers, data=json.dumps(data), stream=stream, ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.iter_lines() streaming_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic", logging_obj=logging_obj, ) return streaming_response else: response = requests.post( api_base, headers=headers, data=json.dumps(data) ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) if stream and _is_function_call: return self.process_streaming_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass
() -> None
62,861
litellm.llms.base
__aexit__
null
def __exit__(self): if hasattr(self, "_client_session"): self._client_session.close()
(self, exc_type, exc_val, exc_tb)
62,863
litellm.llms.anthropic
__init__
null
def __init__(self) -> None: super().__init__()
(self) -> NoneType
62,864
litellm.llms.anthropic
acompletion_function
null
def process_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> ModelResponse: ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) else: text_content = "" tool_calls = [] for content in completion_response["content"]: if content["type"] == "text": text_content += content["text"] ## TOOL CALLING elif content["type"] == "tool_use": tool_calls.append( { "id": content["id"], "type": "function", "function": { "name": content["name"], "arguments": json.dumps(content["input"]), }, } ) _message = litellm.Message( tool_calls=tool_calls, content=text_content or None, ) model_response.choices[0].message = _message # type: ignore model_response._hidden_params["original_response"] = completion_response[ "content" ] # allow user to access raw anthropic tool calling response model_response.choices[0].finish_reason = map_finish_reason( completion_response["stop_reason"] ) ## CALCULATING USAGE prompt_tokens = completion_response["usage"]["input_tokens"] completion_tokens = completion_response["usage"]["output_tokens"] total_tokens = prompt_tokens + completion_tokens model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens, ) setattr(model_response, "usage", usage) # type: ignore return model_response
(self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: litellm.utils.ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, stream, _is_function_call, data: dict, optional_params: dict, litellm_params=None, logger_fn=None, headers={}) -> Union[litellm.utils.ModelResponse, litellm.utils.CustomStreamWrapper]
62,866
litellm.llms.anthropic
completion
null
def completion( self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params: dict, acompletion=None, litellm_params=None, logger_fn=None, headers={}, ): headers = validate_environment(api_key, headers) _is_function_call = False messages = copy.deepcopy(messages) optional_params = copy.deepcopy(optional_params) if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: # Separate system prompt from rest of message system_prompt_indices = [] system_prompt = "" for idx, message in enumerate(messages): if message["role"] == "system": system_prompt += message["content"] system_prompt_indices.append(idx) if len(system_prompt_indices) > 0: for idx in reversed(system_prompt_indices): messages.pop(idx) if len(system_prompt) > 0: optional_params["system"] = system_prompt # Format rest of message according to anthropic guidelines try: messages = prompt_factory( model=model, messages=messages, custom_llm_provider="anthropic" ) except Exception as e: raise AnthropicError(status_code=400, message=str(e)) ## Load Config config = litellm.AnthropicConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v ## Handle Tool Calling if "tools" in optional_params: _is_function_call = True headers["anthropic-beta"] = "tools-2024-04-04" anthropic_tools = [] for tool in optional_params["tools"]: new_tool = tool["function"] new_tool["input_schema"] = new_tool.pop("parameters") # rename key anthropic_tools.append(new_tool) optional_params["tools"] = anthropic_tools stream = optional_params.pop("stream", None) data = { "model": model, "messages": messages, **optional_params, } ## LOGGING logging_obj.pre_call( input=messages, api_key=api_key, additional_args={ "complete_input_dict": data, "api_base": api_base, "headers": headers, }, ) print_verbose(f"_is_function_call: {_is_function_call}") if acompletion == True: if ( stream and not _is_function_call ): # if function call - fake the streaming (need complete blocks for output parsing in openai format) print_verbose("makes async anthropic streaming POST request") data["stream"] = stream return self.acompletion_stream_function( model=model, messages=messages, data=data, api_base=api_base, custom_prompt_dict=custom_prompt_dict, model_response=model_response, print_verbose=print_verbose, encoding=encoding, api_key=api_key, logging_obj=logging_obj, optional_params=optional_params, stream=stream, _is_function_call=_is_function_call, litellm_params=litellm_params, logger_fn=logger_fn, headers=headers, ) else: return self.acompletion_function( model=model, messages=messages, data=data, api_base=api_base, custom_prompt_dict=custom_prompt_dict, model_response=model_response, print_verbose=print_verbose, encoding=encoding, api_key=api_key, logging_obj=logging_obj, optional_params=optional_params, stream=stream, _is_function_call=_is_function_call, litellm_params=litellm_params, logger_fn=logger_fn, headers=headers, ) else: ## COMPLETION CALL if ( stream and not _is_function_call ): # if function call - fake the streaming (need complete blocks for output parsing in openai format) print_verbose("makes anthropic streaming POST request") data["stream"] = stream response = requests.post( api_base, headers=headers, data=json.dumps(data), stream=stream, ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.iter_lines() streaming_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic", logging_obj=logging_obj, ) return streaming_response else: response = requests.post( api_base, headers=headers, data=json.dumps(data) ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) if stream and _is_function_call: return self.process_streaming_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, )
(self, model: str, messages: list, api_base: str, custom_prompt_dict: dict, model_response: litellm.utils.ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params: dict, acompletion=None, litellm_params=None, logger_fn=None, headers={})
62,867
litellm.llms.base
create_aclient_session
null
def create_aclient_session(self): if litellm.aclient_session: _aclient_session = litellm.aclient_session else: _aclient_session = httpx.AsyncClient() return _aclient_session
(self)
62,868
litellm.llms.base
create_client_session
null
def create_client_session(self): if litellm.client_session: _client_session = litellm.client_session else: _client_session = httpx.Client() return _client_session
(self)
62,869
litellm.llms.anthropic
embedding
null
def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass
(self)
62,871
litellm.llms.anthropic
process_streaming_response
Return stream object for tool-calling + streaming
def process_streaming_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> CustomStreamWrapper: """ Return stream object for tool-calling + streaming """ ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) text_content = "" tool_calls = [] for content in completion_response["content"]: if content["type"] == "text": text_content += content["text"] ## TOOL CALLING elif content["type"] == "tool_use": tool_calls.append( { "id": content["id"], "type": "function", "function": { "name": content["name"], "arguments": json.dumps(content["input"]), }, } ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) _message = litellm.Message( tool_calls=tool_calls, content=text_content or None, ) model_response.choices[0].message = _message # type: ignore model_response._hidden_params["original_response"] = completion_response[ "content" ] # allow user to access raw anthropic tool calling response model_response.choices[0].finish_reason = map_finish_reason( completion_response["stop_reason"] ) print_verbose("INSIDE ANTHROPIC STREAMING TOOL CALLING CONDITION BLOCK") # return an iterator streaming_model_response = ModelResponse(stream=True) streaming_model_response.choices[0].finish_reason = model_response.choices[ # type: ignore 0 ].finish_reason # streaming_model_response.choices = [litellm.utils.StreamingChoices()] streaming_choice = litellm.utils.StreamingChoices() streaming_choice.index = model_response.choices[0].index _tool_calls = [] print_verbose( f"type of model_response.choices[0]: {type(model_response.choices[0])}" ) print_verbose(f"type of streaming_choice: {type(streaming_choice)}") if isinstance(model_response.choices[0], litellm.Choices): if getattr( model_response.choices[0].message, "tool_calls", None ) is not None and isinstance( model_response.choices[0].message.tool_calls, list ): for tool_call in model_response.choices[0].message.tool_calls: _tool_call = {**tool_call.dict(), "index": 0} _tool_calls.append(_tool_call) delta_obj = litellm.utils.Delta( content=getattr(model_response.choices[0].message, "content", None), role=model_response.choices[0].message.role, tool_calls=_tool_calls, ) streaming_choice.delta = delta_obj streaming_model_response.choices = [streaming_choice] completion_stream = ModelResponseIterator( model_response=streaming_model_response ) print_verbose( "Returns anthropic CustomStreamWrapper with 'cached_response' streaming object" ) return CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="cached_response", logging_obj=logging_obj, ) else: raise AnthropicError( status_code=422, message="Unprocessable response object - {}".format(response.text), )
(self, model: str, response: Union[requests.models.Response, httpx.Response], model_response: litellm.utils.ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding) -> litellm.utils.CustomStreamWrapper
62,872
litellm.llms.base
validate_environment
null
def validate_environment(self): # set up the environment required to run the model pass
(self)
62,873
litellm.llms.anthropic
AnthropicConfig
Reference: https://docs.anthropic.com/claude/reference/messages_post to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
class AnthropicConfig: """ Reference: https://docs.anthropic.com/claude/reference/messages_post to pass metadata to anthropic, it's {"user_id": "any-relevant-information"} """ max_tokens: Optional[int] = ( 4096 # anthropic requires a default value (Opus, Sonnet, and Haiku have the same default) ) stop_sequences: Optional[list] = None temperature: Optional[int] = None top_p: Optional[int] = None top_k: Optional[int] = None metadata: Optional[dict] = None system: Optional[str] = None def __init__( self, max_tokens: Optional[ int ] = 4096, # You can pass in a value yourself or use the default value 4096 stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, system: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "stream", "stop", "temperature", "top_p", "max_tokens", "tools", "tool_choice", ] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream" and value == True: optional_params["stream"] = value if param == "stop": if isinstance(value, str): if ( value == "\n" ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue value = [value] elif isinstance(value, list): new_v = [] for v in value: if ( v == "\n" ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue new_v.append(v) if len(new_v) > 0: value = new_v else: continue optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value return optional_params
(max_tokens: Optional[int] = 4096, stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, system: Optional[str] = None) -> None
62,874
litellm.llms.anthropic
__init__
null
def __init__( self, max_tokens: Optional[ int ] = 4096, # You can pass in a value yourself or use the default value 4096 stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, system: Optional[str] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = 4096, stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, system: Optional[str] = None) -> NoneType
62,875
litellm.llms.anthropic
get_supported_openai_params
null
def get_supported_openai_params(self): return [ "stream", "stop", "temperature", "top_p", "max_tokens", "tools", "tool_choice", ]
(self)
62,876
litellm.llms.anthropic
map_openai_params
null
def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream" and value == True: optional_params["stream"] = value if param == "stop": if isinstance(value, str): if ( value == "\n" ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue value = [value] elif isinstance(value, list): new_v = [] for v in value: if ( v == "\n" ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue new_v.append(v) if len(new_v) > 0: value = new_v else: continue optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value return optional_params
(self, non_default_params: dict, optional_params: dict)
62,877
litellm.llms.anthropic_text
AnthropicTextCompletion
null
class AnthropicTextCompletion(BaseLLM): def __init__(self) -> None: super().__init__() def _process_response( self, model_response: ModelResponse, response, encoding, prompt: str, model: str ): ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) else: if len(completion_response["completion"]) > 0: model_response["choices"][0]["message"]["content"] = ( completion_response["completion"] ) model_response.choices[0].finish_reason = completion_response["stop_reason"] ## CALCULATING USAGE prompt_tokens = len( encoding.encode(prompt) ) ##[TODO] use the anthropic tokenizer here completion_tokens = len( encoding.encode(model_response["choices"][0]["message"].get("content", "")) ) ##[TODO] use the anthropic tokenizer here model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) setattr(model_response, "usage", usage) return model_response async def async_completion( self, model: str, model_response: ModelResponse, api_base: str, logging_obj, encoding, headers: dict, data: dict, client=None, ): if client is None: client = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = await client.post(api_base, headers=headers, data=json.dumps(data)) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) ## LOGGING logging_obj.post_call( input=data["prompt"], api_key=headers.get("x-api-key"), original_response=response.text, additional_args={"complete_input_dict": data}, ) response = self._process_response( model_response=model_response, response=response, encoding=encoding, prompt=data["prompt"], model=model, ) return response async def async_streaming( self, model: str, api_base: str, logging_obj, headers: dict, data: Optional[dict], client=None, ): if client is None: client = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = await client.post(api_base, headers=headers, data=json.dumps(data)) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.aiter_lines() streamwrapper = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic_text", logging_obj=logging_obj, ) return streamwrapper def completion( self, model: str, messages: list, api_base: str, acompletion: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params=None, litellm_params=None, logger_fn=None, headers={}, client=None, ): headers = validate_environment(api_key, headers) if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="anthropic" ) ## Load Config config = litellm.AnthropicTextConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v data = { "model": model, "prompt": prompt, **optional_params, } ## LOGGING logging_obj.pre_call( input=prompt, api_key=api_key, additional_args={ "complete_input_dict": data, "api_base": api_base, "headers": headers, }, ) ## COMPLETION CALL if "stream" in optional_params and optional_params["stream"] == True: if acompletion == True: return self.async_streaming( model=model, api_base=api_base, logging_obj=logging_obj, headers=headers, data=data, client=None, ) if client is None: client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = client.post( api_base, headers=headers, data=json.dumps(data), # stream=optional_params["stream"], ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.iter_lines() stream_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic_text", logging_obj=logging_obj, ) return stream_response elif acompletion == True: return self.async_completion( model=model, model_response=model_response, api_base=api_base, logging_obj=logging_obj, encoding=encoding, headers=headers, data=data, client=client, ) else: if client is None: client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = client.post(api_base, headers=headers, data=json.dumps(data)) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") response = self._process_response( model_response=model_response, response=response, encoding=encoding, prompt=data["prompt"], model=model, ) return response def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass
() -> None
62,881
litellm.llms.anthropic_text
_process_response
null
def _process_response( self, model_response: ModelResponse, response, encoding, prompt: str, model: str ): ## RESPONSE OBJECT try: completion_response = response.json() except: raise AnthropicError( message=response.text, status_code=response.status_code ) if "error" in completion_response: raise AnthropicError( message=str(completion_response["error"]), status_code=response.status_code, ) else: if len(completion_response["completion"]) > 0: model_response["choices"][0]["message"]["content"] = ( completion_response["completion"] ) model_response.choices[0].finish_reason = completion_response["stop_reason"] ## CALCULATING USAGE prompt_tokens = len( encoding.encode(prompt) ) ##[TODO] use the anthropic tokenizer here completion_tokens = len( encoding.encode(model_response["choices"][0]["message"].get("content", "")) ) ##[TODO] use the anthropic tokenizer here model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) setattr(model_response, "usage", usage) return model_response
(self, model_response: litellm.utils.ModelResponse, response, encoding, prompt: str, model: str)
62,884
litellm.llms.anthropic_text
completion
null
def completion( self, model: str, messages: list, api_base: str, acompletion: str, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params=None, litellm_params=None, logger_fn=None, headers={}, client=None, ): headers = validate_environment(api_key, headers) if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="anthropic" ) ## Load Config config = litellm.AnthropicTextConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v data = { "model": model, "prompt": prompt, **optional_params, } ## LOGGING logging_obj.pre_call( input=prompt, api_key=api_key, additional_args={ "complete_input_dict": data, "api_base": api_base, "headers": headers, }, ) ## COMPLETION CALL if "stream" in optional_params and optional_params["stream"] == True: if acompletion == True: return self.async_streaming( model=model, api_base=api_base, logging_obj=logging_obj, headers=headers, data=data, client=None, ) if client is None: client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = client.post( api_base, headers=headers, data=json.dumps(data), # stream=optional_params["stream"], ) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) completion_stream = response.iter_lines() stream_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="anthropic_text", logging_obj=logging_obj, ) return stream_response elif acompletion == True: return self.async_completion( model=model, model_response=model_response, api_base=api_base, logging_obj=logging_obj, encoding=encoding, headers=headers, data=data, client=client, ) else: if client is None: client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) response = client.post(api_base, headers=headers, data=json.dumps(data)) if response.status_code != 200: raise AnthropicError( status_code=response.status_code, message=response.text ) ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") response = self._process_response( model_response=model_response, response=response, encoding=encoding, prompt=data["prompt"], model=model, ) return response
(self, model: str, messages: list, api_base: str, acompletion: str, custom_prompt_dict: dict, model_response: litellm.utils.ModelResponse, print_verbose: Callable, encoding, api_key, logging_obj, optional_params=None, litellm_params=None, logger_fn=None, headers={}, client=None)
62,888
litellm.llms.base
process_response
Helper function to process the response across sync + async completion calls
def process_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: litellm.utils.ModelResponse, stream: bool, logging_obj: Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: list, print_verbose, encoding, ) -> litellm.utils.ModelResponse: """ Helper function to process the response across sync + async completion calls """ return model_response
(self, model: str, response: Union[requests.models.Response, httpx.Response], model_response: litellm.utils.ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: list, print_verbose, encoding) -> litellm.utils.ModelResponse
62,890
litellm.llms.anthropic_text
AnthropicTextConfig
Reference: https://docs.anthropic.com/claude/reference/complete_post to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
class AnthropicTextConfig: """ Reference: https://docs.anthropic.com/claude/reference/complete_post to pass metadata to anthropic, it's {"user_id": "any-relevant-information"} """ max_tokens_to_sample: Optional[int] = ( litellm.max_tokens ) # anthropic requires a default stop_sequences: Optional[list] = None temperature: Optional[int] = None top_p: Optional[int] = None top_k: Optional[int] = None metadata: Optional[dict] = None def __init__( self, max_tokens_to_sample: Optional[int] = 256, # anthropic requires a default stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens_to_sample: Optional[int] = 256, stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None) -> None
62,891
litellm.llms.anthropic_text
__init__
null
def __init__( self, max_tokens_to_sample: Optional[int] = 256, # anthropic requires a default stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens_to_sample: Optional[int] = 256, stop_sequences: Optional[list] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, top_k: Optional[int] = None, metadata: Optional[dict] = None) -> NoneType
62,892
openai.types.beta.assistant
Assistant
null
class Assistant(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the assistant was created.""" description: Optional[str] = None """The description of the assistant. The maximum length is 512 characters.""" instructions: Optional[str] = None """The system instructions that the assistant uses. The maximum length is 256,000 characters. """ metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: str """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ name: Optional[str] = None """The name of the assistant. The maximum length is 256 characters.""" object: Literal["assistant"] """The object type, which is always `assistant`.""" tools: List[AssistantTool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. """ response_format: Optional[AssistantResponseFormatOption] = None """Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. """ temperature: Optional[float] = None """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. """ tool_resources: Optional[ToolResources] = None """A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. """ top_p: Optional[float] = None """ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. """
(**data: 'Any') -> 'None'
62,909
openai._models
__str__
null
@override def __str__(self) -> str: # mypy complains about an invalid self arg return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc]
(self) -> str
62,921
openai._models
to_dict
Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. By default, fields that were not set by the API will not be included, and keys will match the API response, *not* the property names from the model. For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). Args: mode: If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value from the output. exclude_none: Whether to exclude fields that have a value of `None` from the output. warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
def to_dict( self, *, mode: Literal["json", "python"] = "python", use_api_names: bool = True, exclude_unset: bool = True, exclude_defaults: bool = False, exclude_none: bool = False, warnings: bool = True, ) -> dict[str, object]: """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. By default, fields that were not set by the API will not be included, and keys will match the API response, *not* the property names from the model. For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). Args: mode: If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value from the output. exclude_none: Whether to exclude fields that have a value of `None` from the output. warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2. """ return self.model_dump( mode=mode, by_alias=use_api_names, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, warnings=warnings, )
(self, *, mode: Literal['json', 'python'] = 'python', use_api_names: bool = True, exclude_unset: bool = True, exclude_defaults: bool = False, exclude_none: bool = False, warnings: bool = True) -> dict[str, object]
62,922
openai._models
to_json
Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). By default, fields that were not set by the API will not be included, and keys will match the API response, *not* the property names from the model. For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). Args: indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that have the default value. exclude_none: Whether to exclude fields that have a value of `None`. warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
def to_json( self, *, indent: int | None = 2, use_api_names: bool = True, exclude_unset: bool = True, exclude_defaults: bool = False, exclude_none: bool = False, warnings: bool = True, ) -> str: """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). By default, fields that were not set by the API will not be included, and keys will match the API response, *not* the property names from the model. For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). Args: indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that have the default value. exclude_none: Whether to exclude fields that have a value of `None`. warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2. """ return self.model_dump_json( indent=indent, by_alias=use_api_names, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, warnings=warnings, )
(self, *, indent: int | None = 2, use_api_names: bool = True, exclude_unset: bool = True, exclude_defaults: bool = False, exclude_none: bool = False, warnings: bool = True) -> str
62,923
litellm.main
AsyncCompletions
null
class AsyncCompletions: def __init__(self, params, router_obj: Optional[Any]): self.params = params self.router_obj = router_obj async def create(self, messages, model=None, **kwargs): for k, v in kwargs.items(): self.params[k] = v model = model or self.params.get("model") if self.router_obj is not None: response = await self.router_obj.acompletion( model=model, messages=messages, **self.params ) else: response = await acompletion(model=model, messages=messages, **self.params) return response
(params, router_obj: Optional[Any])
62,924
litellm.main
__init__
null
def __init__(self, params, router_obj: Optional[Any]): self.params = params self.router_obj = router_obj
(self, params, router_obj: Optional[Any])
62,926
litellm.types.llms.openai
Attachment
null
class Attachment(TypedDict, total=False): file_id: str """The ID of the file to attach to the message.""" tools: Iterable[AttachmentTool] """The tools to add this file to."""
null
62,927
litellm.exceptions
AuthenticationError
null
class AuthenticationError(openai.AuthenticationError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 401 self.message = message self.llm_provider = llm_provider self.model = model super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(message, llm_provider, model, response: httpx.Response)
62,928
litellm.exceptions
__init__
null
def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 401 self.message = message self.llm_provider = llm_provider self.model = model super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(self, message, llm_provider, model, response: httpx.Response)
62,929
litellm.llms.azure
AzureChatCompletion
null
class AzureChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() def validate_environment(self, api_key, azure_ad_token): headers = { "content-type": "application/json", } if api_key is not None: headers["api-key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) headers["Authorization"] = f"Bearer {azure_ad_token}" return headers def completion( self, model: str, messages: list, model_response: ModelResponse, api_key: str, api_base: str, api_version: str, api_type: str, azure_ad_token: str, print_verbose: Callable, timeout: Union[float, httpx.Timeout], logging_obj, optional_params, litellm_params, logger_fn, acompletion: bool = False, headers: Optional[dict] = None, client=None, ): super().completion() exception_mapping_worked = False try: if model is None or messages is None: raise AzureOpenAIError( status_code=422, message=f"Missing model or messages" ) max_retries = optional_params.pop("max_retries", 2) ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if client is None: if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" azure_client_params = { "api_version": api_version, "base_url": f"{api_base}", "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if acompletion is True: client = AsyncAzureOpenAI(**azure_client_params) else: client = AzureOpenAI(**azure_client_params) data = {"model": None, "messages": messages, **optional_params} else: data = { "model": model, # type: ignore "messages": messages, **optional_params, } if acompletion is True: if optional_params.get("stream", False): return self.async_streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: return self.acompletion( api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token, timeout=timeout, client=client, logging_obj=logging_obj, ) elif "stream" in optional_params and optional_params["stream"] == True: return self.streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: ## LOGGING logging_obj.pre_call( input=messages, api_key=api_key, additional_args={ "headers": { "api_key": api_key, "azure_ad_token": azure_ad_token, }, "api_version": api_version, "api_base": api_base, "complete_input_dict": data, }, ) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault( "api-version", api_version ) response = azure_client.chat.completions.create(**data, timeout=timeout) # type: ignore stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=stringified_response, additional_args={ "headers": headers, "api_version": api_version, "api_base": api_base, }, ) return convert_to_model_response_object( response_object=stringified_response, model_response_object=model_response, ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e)) async def acompletion( self, api_key: str, api_version: str, model: str, api_base: str, data: dict, timeout: Any, model_response: ModelResponse, azure_ad_token: Optional[str] = None, client=None, # this is the AsyncAzureOpenAI logging_obj=None, ): response = None try: max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token # setting Azure client if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["messages"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = await azure_client.chat.completions.create( **data, timeout=timeout ) return convert_to_model_response_object( response_object=response.model_dump(), model_response_object=model_response, ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise e else: raise AzureOpenAIError(status_code=500, message=str(e)) def streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance(azure_client._custom_query, dict): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["messages"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = azure_client.chat.completions.create(**data, timeout=timeout) streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure", logging_obj=logging_obj, ) return streamwrapper async def async_streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): try: # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": data.pop("max_retries", 2), "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["messages"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = await azure_client.chat.completions.create( **data, timeout=timeout ) # return response streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure", logging_obj=logging_obj, ) return streamwrapper ## DO NOT make this into an async for ... loop, it will yield an async generator, which won't raise errors if the response fails except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e)) async def aembedding( self, data: dict, model_response: ModelResponse, azure_client_params: dict, api_key: str, input: list, client=None, logging_obj=None, timeout=None, ): response = None try: if client is None: openai_aclient = AsyncAzureOpenAI(**azure_client_params) else: openai_aclient = client response = await openai_aclient.embeddings.create(**data, timeout=timeout) stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=stringified_response, ) return convert_to_model_response_object( response_object=stringified_response, model_response_object=model_response, response_type="embedding", ) except Exception as e: ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=str(e), ) raise e def embedding( self, model: str, input: list, api_key: str, api_base: str, api_version: str, timeout: float, logging_obj=None, model_response=None, optional_params=None, azure_ad_token: Optional[str] = None, client=None, aembedding=None, ): super().embedding() exception_mapping_worked = False if self._client_session is None: self._client_session = self.create_client_session() try: data = {"model": model, "input": input, **optional_params} max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token ## LOGGING logging_obj.pre_call( input=input, api_key=api_key, additional_args={ "complete_input_dict": data, "headers": {"api_key": api_key, "azure_ad_token": azure_ad_token}, }, ) if aembedding == True: response = self.aembedding( data=data, input=input, logging_obj=logging_obj, api_key=api_key, model_response=model_response, azure_client_params=azure_client_params, timeout=timeout, ) return response if client is None: azure_client = AzureOpenAI(**azure_client_params) # type: ignore else: azure_client = client ## COMPLETION CALL response = azure_client.embeddings.create(**data, timeout=timeout) # type: ignore ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data, "api_base": api_base}, original_response=response, ) return convert_to_model_response_object(response_object=response.model_dump(), model_response_object=model_response, response_type="embedding") # type: ignore except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e)) async def aimage_generation( self, data: dict, model_response: ModelResponse, azure_client_params: dict, api_key: str, input: list, client=None, logging_obj=None, timeout=None, ): response = None try: if client is None: client_session = litellm.aclient_session or httpx.AsyncClient( transport=AsyncCustomHTTPTransport(), ) azure_client = AsyncAzureOpenAI( http_client=client_session, **azure_client_params ) else: azure_client = client ## LOGGING logging_obj.pre_call( input=data["prompt"], api_key=azure_client.api_key, additional_args={ "headers": {"api_key": azure_client.api_key}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = await azure_client.images.generate(**data, timeout=timeout) stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=stringified_response, ) return convert_to_model_response_object( response_object=stringified_response, model_response_object=model_response, response_type="image_generation", ) except Exception as e: ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=str(e), ) raise e def image_generation( self, prompt: str, timeout: float, model: Optional[str] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, model_response: Optional[litellm.utils.ImageResponse] = None, azure_ad_token: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aimg_generation=None, ): exception_mapping_worked = False try: if model and len(model) > 0: model = model else: model = None ## BASE MODEL CHECK if ( model_response is not None and optional_params.get("base_model", None) is not None ): model_response._hidden_params["model"] = optional_params.pop( "base_model" ) data = {"model": model, "prompt": prompt, **optional_params} max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if aimg_generation == True: response = self.aimage_generation(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_key=api_key, client=client, azure_client_params=azure_client_params, timeout=timeout) # type: ignore return response if client is None: client_session = litellm.client_session or httpx.Client( transport=CustomHTTPTransport(), ) azure_client = AzureOpenAI(http_client=client_session, **azure_client_params) # type: ignore else: azure_client = client ## LOGGING logging_obj.pre_call( input=prompt, api_key=azure_client.api_key, additional_args={ "headers": {"api_key": azure_client.api_key}, "api_base": azure_client._base_url._uri_reference, "acompletion": False, "complete_input_dict": data, }, ) ## COMPLETION CALL response = azure_client.images.generate(**data, timeout=timeout) # type: ignore ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=response, ) # return response return convert_to_model_response_object(response_object=response.model_dump(), model_response_object=model_response, response_type="image_generation") # type: ignore except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e)) def audio_transcriptions( self, model: str, audio_file: BinaryIO, optional_params: dict, model_response: TranscriptionResponse, timeout: float, max_retries: int, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, client=None, azure_ad_token: Optional[str] = None, logging_obj=None, atranscription: bool = False, ): data = {"model": model, "file": audio_file, **optional_params} # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if max_retries is not None: azure_client_params["max_retries"] = max_retries if atranscription == True: return self.async_audio_transcriptions( audio_file=audio_file, data=data, model_response=model_response, timeout=timeout, api_key=api_key, api_base=api_base, client=client, azure_client_params=azure_client_params, max_retries=max_retries, logging_obj=logging_obj, ) if client is None: azure_client = AzureOpenAI(http_client=litellm.client_session, **azure_client_params) # type: ignore else: azure_client = client ## LOGGING logging_obj.pre_call( input=f"audio_file_{uuid.uuid4()}", api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "atranscription": True, "complete_input_dict": data, }, ) response = azure_client.audio.transcriptions.create( **data, timeout=timeout # type: ignore ) stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=audio_file.name, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=stringified_response, ) hidden_params = {"model": "whisper-1", "custom_llm_provider": "azure"} final_response = convert_to_model_response_object(response_object=stringified_response, model_response_object=model_response, hidden_params=hidden_params, response_type="audio_transcription") # type: ignore return final_response async def async_audio_transcriptions( self, audio_file: BinaryIO, data: dict, model_response: TranscriptionResponse, timeout: float, api_key: Optional[str] = None, api_base: Optional[str] = None, client=None, azure_client_params=None, max_retries=None, logging_obj=None, ): response = None try: if client is None: async_azure_client = AsyncAzureOpenAI( **azure_client_params, http_client=litellm.aclient_session, ) else: async_azure_client = client ## LOGGING logging_obj.pre_call( input=f"audio_file_{uuid.uuid4()}", api_key=async_azure_client.api_key, additional_args={ "headers": { "Authorization": f"Bearer {async_azure_client.api_key}" }, "api_base": async_azure_client._base_url._uri_reference, "atranscription": True, "complete_input_dict": data, }, ) response = await async_azure_client.audio.transcriptions.create( **data, timeout=timeout ) # type: ignore stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=audio_file.name, api_key=api_key, additional_args={ "headers": { "Authorization": f"Bearer {async_azure_client.api_key}" }, "api_base": async_azure_client._base_url._uri_reference, "atranscription": True, "complete_input_dict": data, }, original_response=stringified_response, ) hidden_params = {"model": "whisper-1", "custom_llm_provider": "azure"} response = convert_to_model_response_object(response_object=stringified_response, model_response_object=model_response, hidden_params=hidden_params, response_type="audio_transcription") # type: ignore return response except Exception as e: ## LOGGING logging_obj.post_call( input=input, api_key=api_key, original_response=str(e), ) raise e def get_headers( self, model: Optional[str], api_key: str, api_base: str, api_version: str, timeout: float, mode: str, messages: Optional[list] = None, input: Optional[list] = None, prompt: Optional[str] = None, ) -> dict: client_session = litellm.client_session or httpx.Client( transport=CustomHTTPTransport(), # handle dall-e-2 calls ) if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" client = AzureOpenAI( base_url=api_base, api_version=api_version, api_key=api_key, timeout=timeout, http_client=client_session, ) model = None # cloudflare ai gateway, needs model=None else: client = AzureOpenAI( api_version=api_version, azure_endpoint=api_base, api_key=api_key, timeout=timeout, http_client=client_session, ) # only run this check if it's not cloudflare ai gateway if model is None and mode != "image_generation": raise Exception("model is not set") completion = None if messages is None: messages = [{"role": "user", "content": "Hey"}] try: completion = client.chat.completions.with_raw_response.create( model=model, # type: ignore messages=messages, # type: ignore ) except Exception as e: raise e response = {} if completion is None or not hasattr(completion, "headers"): raise Exception("invalid completion response") if ( completion.headers.get("x-ratelimit-remaining-requests", None) is not None ): # not provided for dall-e requests response["x-ratelimit-remaining-requests"] = completion.headers[ "x-ratelimit-remaining-requests" ] if completion.headers.get("x-ratelimit-remaining-tokens", None) is not None: response["x-ratelimit-remaining-tokens"] = completion.headers[ "x-ratelimit-remaining-tokens" ] if completion.headers.get("x-ms-region", None) is not None: response["x-ms-region"] = completion.headers["x-ms-region"] return response async def ahealth_check( self, model: Optional[str], api_key: str, api_base: str, api_version: str, timeout: float, mode: str, messages: Optional[list] = None, input: Optional[list] = None, prompt: Optional[str] = None, ) -> dict: client_session = litellm.aclient_session or httpx.AsyncClient( transport=AsyncCustomHTTPTransport(), # handle dall-e-2 calls ) if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" client = AsyncAzureOpenAI( base_url=api_base, api_version=api_version, api_key=api_key, timeout=timeout, http_client=client_session, ) model = None # cloudflare ai gateway, needs model=None else: client = AsyncAzureOpenAI( api_version=api_version, azure_endpoint=api_base, api_key=api_key, timeout=timeout, http_client=client_session, ) # only run this check if it's not cloudflare ai gateway if model is None and mode != "image_generation": raise Exception("model is not set") completion = None if mode == "completion": completion = await client.completions.with_raw_response.create( model=model, # type: ignore prompt=prompt, # type: ignore ) elif mode == "chat": if messages is None: raise Exception("messages is not set") completion = await client.chat.completions.with_raw_response.create( model=model, # type: ignore messages=messages, # type: ignore ) elif mode == "embedding": if input is None: raise Exception("input is not set") completion = await client.embeddings.with_raw_response.create( model=model, # type: ignore input=input, # type: ignore ) elif mode == "image_generation": if prompt is None: raise Exception("prompt is not set") completion = await client.images.with_raw_response.generate( model=model, # type: ignore prompt=prompt, # type: ignore ) else: raise Exception("mode not set") response = {} if completion is None or not hasattr(completion, "headers"): raise Exception("invalid completion response") if ( completion.headers.get("x-ratelimit-remaining-requests", None) is not None ): # not provided for dall-e requests response["x-ratelimit-remaining-requests"] = completion.headers[ "x-ratelimit-remaining-requests" ] if completion.headers.get("x-ratelimit-remaining-tokens", None) is not None: response["x-ratelimit-remaining-tokens"] = completion.headers[ "x-ratelimit-remaining-tokens" ] if completion.headers.get("x-ms-region", None) is not None: response["x-ms-region"] = completion.headers["x-ms-region"] return response
() -> None
62,933
litellm.llms.azure
acompletion
null
def completion( self, model: str, messages: list, model_response: ModelResponse, api_key: str, api_base: str, api_version: str, api_type: str, azure_ad_token: str, print_verbose: Callable, timeout: Union[float, httpx.Timeout], logging_obj, optional_params, litellm_params, logger_fn, acompletion: bool = False, headers: Optional[dict] = None, client=None, ): super().completion() exception_mapping_worked = False try: if model is None or messages is None: raise AzureOpenAIError( status_code=422, message=f"Missing model or messages" ) max_retries = optional_params.pop("max_retries", 2) ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if client is None: if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" azure_client_params = { "api_version": api_version, "base_url": f"{api_base}", "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if acompletion is True: client = AsyncAzureOpenAI(**azure_client_params) else: client = AzureOpenAI(**azure_client_params) data = {"model": None, "messages": messages, **optional_params} else: data = { "model": model, # type: ignore "messages": messages, **optional_params, } if acompletion is True: if optional_params.get("stream", False): return self.async_streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: return self.acompletion( api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token, timeout=timeout, client=client, logging_obj=logging_obj, ) elif "stream" in optional_params and optional_params["stream"] == True: return self.streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: ## LOGGING logging_obj.pre_call( input=messages, api_key=api_key, additional_args={ "headers": { "api_key": api_key, "azure_ad_token": azure_ad_token, }, "api_version": api_version, "api_base": api_base, "complete_input_dict": data, }, ) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault( "api-version", api_version ) response = azure_client.chat.completions.create(**data, timeout=timeout) # type: ignore stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=stringified_response, additional_args={ "headers": headers, "api_version": api_version, "api_base": api_base, }, ) return convert_to_model_response_object( response_object=stringified_response, model_response_object=model_response, ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e))
(self, api_key: str, api_version: str, model: str, api_base: str, data: dict, timeout: Any, model_response: litellm.utils.ModelResponse, azure_ad_token: Optional[str] = None, client=None, logging_obj=None)
62,934
litellm.llms.azure
aembedding
null
def streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance(azure_client._custom_query, dict): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["messages"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = azure_client.chat.completions.create(**data, timeout=timeout) streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure", logging_obj=logging_obj, ) return streamwrapper
(self, data: dict, model_response: litellm.utils.ModelResponse, azure_client_params: dict, api_key: str, input: list, client=None, logging_obj=None, timeout=None)
62,935
litellm.llms.azure
ahealth_check
null
def get_headers( self, model: Optional[str], api_key: str, api_base: str, api_version: str, timeout: float, mode: str, messages: Optional[list] = None, input: Optional[list] = None, prompt: Optional[str] = None, ) -> dict: client_session = litellm.client_session or httpx.Client( transport=CustomHTTPTransport(), # handle dall-e-2 calls ) if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" client = AzureOpenAI( base_url=api_base, api_version=api_version, api_key=api_key, timeout=timeout, http_client=client_session, ) model = None # cloudflare ai gateway, needs model=None else: client = AzureOpenAI( api_version=api_version, azure_endpoint=api_base, api_key=api_key, timeout=timeout, http_client=client_session, ) # only run this check if it's not cloudflare ai gateway if model is None and mode != "image_generation": raise Exception("model is not set") completion = None if messages is None: messages = [{"role": "user", "content": "Hey"}] try: completion = client.chat.completions.with_raw_response.create( model=model, # type: ignore messages=messages, # type: ignore ) except Exception as e: raise e response = {} if completion is None or not hasattr(completion, "headers"): raise Exception("invalid completion response") if ( completion.headers.get("x-ratelimit-remaining-requests", None) is not None ): # not provided for dall-e requests response["x-ratelimit-remaining-requests"] = completion.headers[ "x-ratelimit-remaining-requests" ] if completion.headers.get("x-ratelimit-remaining-tokens", None) is not None: response["x-ratelimit-remaining-tokens"] = completion.headers[ "x-ratelimit-remaining-tokens" ] if completion.headers.get("x-ms-region", None) is not None: response["x-ms-region"] = completion.headers["x-ms-region"] return response
(self, model: Optional[str], api_key: str, api_base: str, api_version: str, timeout: float, mode: str, messages: Optional[list] = None, input: Optional[list] = None, prompt: Optional[str] = None) -> dict
62,936
litellm.llms.azure
aimage_generation
null
def embedding( self, model: str, input: list, api_key: str, api_base: str, api_version: str, timeout: float, logging_obj=None, model_response=None, optional_params=None, azure_ad_token: Optional[str] = None, client=None, aembedding=None, ): super().embedding() exception_mapping_worked = False if self._client_session is None: self._client_session = self.create_client_session() try: data = {"model": model, "input": input, **optional_params} max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token ## LOGGING logging_obj.pre_call( input=input, api_key=api_key, additional_args={ "complete_input_dict": data, "headers": {"api_key": api_key, "azure_ad_token": azure_ad_token}, }, ) if aembedding == True: response = self.aembedding( data=data, input=input, logging_obj=logging_obj, api_key=api_key, model_response=model_response, azure_client_params=azure_client_params, timeout=timeout, ) return response if client is None: azure_client = AzureOpenAI(**azure_client_params) # type: ignore else: azure_client = client ## COMPLETION CALL response = azure_client.embeddings.create(**data, timeout=timeout) # type: ignore ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data, "api_base": api_base}, original_response=response, ) return convert_to_model_response_object(response_object=response.model_dump(), model_response_object=model_response, response_type="embedding") # type: ignore except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e))
(self, data: dict, model_response: litellm.utils.ModelResponse, azure_client_params: dict, api_key: str, input: list, client=None, logging_obj=None, timeout=None)
62,937
litellm.llms.azure
async_audio_transcriptions
null
def audio_transcriptions( self, model: str, audio_file: BinaryIO, optional_params: dict, model_response: TranscriptionResponse, timeout: float, max_retries: int, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, client=None, azure_ad_token: Optional[str] = None, logging_obj=None, atranscription: bool = False, ): data = {"model": model, "file": audio_file, **optional_params} # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if max_retries is not None: azure_client_params["max_retries"] = max_retries if atranscription == True: return self.async_audio_transcriptions( audio_file=audio_file, data=data, model_response=model_response, timeout=timeout, api_key=api_key, api_base=api_base, client=client, azure_client_params=azure_client_params, max_retries=max_retries, logging_obj=logging_obj, ) if client is None: azure_client = AzureOpenAI(http_client=litellm.client_session, **azure_client_params) # type: ignore else: azure_client = client ## LOGGING logging_obj.pre_call( input=f"audio_file_{uuid.uuid4()}", api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "atranscription": True, "complete_input_dict": data, }, ) response = azure_client.audio.transcriptions.create( **data, timeout=timeout # type: ignore ) stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=audio_file.name, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=stringified_response, ) hidden_params = {"model": "whisper-1", "custom_llm_provider": "azure"} final_response = convert_to_model_response_object(response_object=stringified_response, model_response_object=model_response, hidden_params=hidden_params, response_type="audio_transcription") # type: ignore return final_response
(self, audio_file: <class 'BinaryIO'>, data: dict, model_response: litellm.utils.TranscriptionResponse, timeout: float, api_key: Optional[str] = None, api_base: Optional[str] = None, client=None, azure_client_params=None, max_retries=None, logging_obj=None)
62,945
litellm.llms.azure
image_generation
null
def image_generation( self, prompt: str, timeout: float, model: Optional[str] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, model_response: Optional[litellm.utils.ImageResponse] = None, azure_ad_token: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aimg_generation=None, ): exception_mapping_worked = False try: if model and len(model) > 0: model = model else: model = None ## BASE MODEL CHECK if ( model_response is not None and optional_params.get("base_model", None) is not None ): model_response._hidden_params["model"] = optional_params.pop( "base_model" ) data = {"model": model, "prompt": prompt, **optional_params} max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if aimg_generation == True: response = self.aimage_generation(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_key=api_key, client=client, azure_client_params=azure_client_params, timeout=timeout) # type: ignore return response if client is None: client_session = litellm.client_session or httpx.Client( transport=CustomHTTPTransport(), ) azure_client = AzureOpenAI(http_client=client_session, **azure_client_params) # type: ignore else: azure_client = client ## LOGGING logging_obj.pre_call( input=prompt, api_key=azure_client.api_key, additional_args={ "headers": {"api_key": azure_client.api_key}, "api_base": azure_client._base_url._uri_reference, "acompletion": False, "complete_input_dict": data, }, ) ## COMPLETION CALL response = azure_client.images.generate(**data, timeout=timeout) # type: ignore ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=response, ) # return response return convert_to_model_response_object(response_object=response.model_dump(), model_response_object=model_response, response_type="image_generation") # type: ignore except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e))
(self, prompt: str, timeout: float, model: Optional[str] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, model_response: Optional[litellm.utils.ImageResponse] = None, azure_ad_token: Optional[str] = None, logging_obj=None, optional_params=None, client=None, aimg_generation=None)
62,948
litellm.llms.azure
validate_environment
null
def validate_environment(self, api_key, azure_ad_token): headers = { "content-type": "application/json", } if api_key is not None: headers["api-key"] = api_key elif azure_ad_token is not None: if azure_ad_token.startswith("oidc/"): azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) headers["Authorization"] = f"Bearer {azure_ad_token}" return headers
(self, api_key, azure_ad_token)
62,949
litellm.llms.azure
AzureOpenAIConfig
Reference: https://platform.openai.com/docs/api-reference/chat/create The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. It inherits from `OpenAIConfig`. Below are the parameters:: - `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition. - `function_call` (string or object): This optional parameter controls how the model calls functions. - `functions` (array): An optional parameter. It is a list of functions for which the model may generate JSON inputs. - `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion. - `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. - `n` (integer or null): This optional parameter helps to set how many chat completion choices to generate for each input message. - `presence_penalty` (number or null): Defaults to 0. It penalizes new tokens based on if they appear in the text so far, hence increasing the model's likelihood to talk about new topics. - `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens. - `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2. - `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling.
class AzureOpenAIConfig(OpenAIConfig): """ Reference: https://platform.openai.com/docs/api-reference/chat/create The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. It inherits from `OpenAIConfig`. Below are the parameters:: - `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition. - `function_call` (string or object): This optional parameter controls how the model calls functions. - `functions` (array): An optional parameter. It is a list of functions for which the model may generate JSON inputs. - `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion. - `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. - `n` (integer or null): This optional parameter helps to set how many chat completion choices to generate for each input message. - `presence_penalty` (number or null): Defaults to 0. It penalizes new tokens based on if they appear in the text so far, hence increasing the model's likelihood to talk about new topics. - `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens. - `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2. - `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling. """ def __init__( self, frequency_penalty: Optional[int] = None, function_call: Optional[Union[str, dict]] = None, functions: Optional[list] = None, logit_bias: Optional[dict] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[int] = None, stop: Optional[Union[str, list]] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, ) -> None: super().__init__( frequency_penalty, function_call, functions, logit_bias, max_tokens, n, presence_penalty, stop, temperature, top_p, ) def get_mapped_special_auth_params(self) -> dict: return {"token": "azure_ad_token"} def map_special_auth_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "token": optional_params["azure_ad_token"] = value return optional_params def get_eu_regions(self) -> List[str]: """ Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability """ return ["europe", "sweden", "switzerland", "france", "uk"]
(frequency_penalty: Optional[int] = None, function_call: Union[str, dict, NoneType] = None, functions: Optional[list] = None, logit_bias: Optional[dict] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[int] = None, stop: Union[str, list, NoneType] = None, temperature: Optional[int] = None, top_p: Optional[int] = None) -> None
62,950
litellm.llms.azure
__init__
null
def __init__( self, frequency_penalty: Optional[int] = None, function_call: Optional[Union[str, dict]] = None, functions: Optional[list] = None, logit_bias: Optional[dict] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[int] = None, stop: Optional[Union[str, list]] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, ) -> None: super().__init__( frequency_penalty, function_call, functions, logit_bias, max_tokens, n, presence_penalty, stop, temperature, top_p, )
(self, frequency_penalty: Optional[int] = None, function_call: Union[str, dict, NoneType] = None, functions: Optional[list] = None, logit_bias: Optional[dict] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[int] = None, stop: Union[str, list, NoneType] = None, temperature: Optional[int] = None, top_p: Optional[int] = None) -> NoneType
62,951
litellm.llms.azure
get_eu_regions
Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability
def get_eu_regions(self) -> List[str]: """ Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability """ return ["europe", "sweden", "switzerland", "france", "uk"]
(self) -> List[str]
62,952
litellm.llms.azure
get_mapped_special_auth_params
null
def get_mapped_special_auth_params(self) -> dict: return {"token": "azure_ad_token"}
(self) -> dict
62,953
litellm.llms.azure
map_special_auth_params
null
def map_special_auth_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): if param == "token": optional_params["azure_ad_token"] = value return optional_params
(self, non_default_params: dict, optional_params: dict)
62,954
litellm.llms.azure
AzureOpenAIError
null
class AzureOpenAIError(Exception): def __init__( self, status_code, message, request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None, ): self.status_code = status_code self.message = message if request: self.request = request else: self.request = httpx.Request(method="POST", url="https://api.openai.com/v1") if response: self.response = response else: self.response = httpx.Response( status_code=status_code, request=self.request ) super().__init__( self.message ) # Call the base class constructor with the parameters it needs
(status_code, message, request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None)
62,955
litellm.llms.azure
__init__
null
def __init__( self, status_code, message, request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None, ): self.status_code = status_code self.message = message if request: self.request = request else: self.request = httpx.Request(method="POST", url="https://api.openai.com/v1") if response: self.response = response else: self.response = httpx.Response( status_code=status_code, request=self.request ) super().__init__( self.message ) # Call the base class constructor with the parameters it needs
(self, status_code, message, request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None)
62,956
litellm.llms.azure_text
AzureTextCompletion
null
class AzureTextCompletion(BaseLLM): def __init__(self) -> None: super().__init__() def validate_environment(self, api_key, azure_ad_token): headers = { "content-type": "application/json", } if api_key is not None: headers["api-key"] = api_key elif azure_ad_token is not None: headers["Authorization"] = f"Bearer {azure_ad_token}" return headers def completion( self, model: str, messages: list, model_response: ModelResponse, api_key: str, api_base: str, api_version: str, api_type: str, azure_ad_token: str, print_verbose: Callable, timeout, logging_obj, optional_params, litellm_params, logger_fn, acompletion: bool = False, headers: Optional[dict] = None, client=None, ): super().completion() exception_mapping_worked = False try: if model is None or messages is None: raise AzureOpenAIError( status_code=422, message=f"Missing model or messages" ) max_retries = optional_params.pop("max_retries", 2) prompt = prompt_factory( messages=messages, model=model, custom_llm_provider="azure_text" ) ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if client is None: if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" azure_client_params = { "api_version": api_version, "base_url": f"{api_base}", "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if acompletion is True: client = AsyncAzureOpenAI(**azure_client_params) else: client = AzureOpenAI(**azure_client_params) data = {"model": None, "prompt": prompt, **optional_params} else: data = { "model": model, # type: ignore "prompt": prompt, **optional_params, } if acompletion is True: if optional_params.get("stream", False): return self.async_streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: return self.acompletion( api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token, timeout=timeout, client=client, logging_obj=logging_obj, ) elif "stream" in optional_params and optional_params["stream"] == True: return self.streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: ## LOGGING logging_obj.pre_call( input=prompt, api_key=api_key, additional_args={ "headers": { "api_key": api_key, "azure_ad_token": azure_ad_token, }, "api_version": api_version, "api_base": api_base, "complete_input_dict": data, }, ) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault( "api-version", api_version ) response = azure_client.completions.create(**data, timeout=timeout) # type: ignore stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, original_response=stringified_response, additional_args={ "headers": headers, "api_version": api_version, "api_base": api_base, }, ) return ( openai_text_completion_config.convert_to_chat_model_response_object( response_object=TextCompletionResponse(**stringified_response), model_response_object=model_response, ) ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e)) async def acompletion( self, api_key: str, api_version: str, model: str, api_base: str, data: dict, timeout: Any, model_response: ModelResponse, azure_ad_token: Optional[str] = None, client=None, # this is the AsyncAzureOpenAI logging_obj=None, ): response = None try: max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token # setting Azure client if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["prompt"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = await azure_client.completions.create(**data, timeout=timeout) return openai_text_completion_config.convert_to_chat_model_response_object( response_object=response.model_dump(), model_response_object=model_response, ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise e else: raise AzureOpenAIError(status_code=500, message=str(e)) def streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance(azure_client._custom_query, dict): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["prompt"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = azure_client.completions.create(**data, timeout=timeout) streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure_text", logging_obj=logging_obj, ) return streamwrapper async def async_streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): try: # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": data.pop("max_retries", 2), "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["prompt"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = await azure_client.completions.create(**data, timeout=timeout) # return response streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure_text", logging_obj=logging_obj, ) return streamwrapper ## DO NOT make this into an async for ... loop, it will yield an async generator, which won't raise errors if the response fails except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e))
() -> None
62,960
litellm.llms.azure_text
acompletion
null
def completion( self, model: str, messages: list, model_response: ModelResponse, api_key: str, api_base: str, api_version: str, api_type: str, azure_ad_token: str, print_verbose: Callable, timeout, logging_obj, optional_params, litellm_params, logger_fn, acompletion: bool = False, headers: Optional[dict] = None, client=None, ): super().completion() exception_mapping_worked = False try: if model is None or messages is None: raise AzureOpenAIError( status_code=422, message=f"Missing model or messages" ) max_retries = optional_params.pop("max_retries", 2) prompt = prompt_factory( messages=messages, model=model, custom_llm_provider="azure_text" ) ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name if client is None: if not api_base.endswith("/"): api_base += "/" api_base += f"{model}" azure_client_params = { "api_version": api_version, "base_url": f"{api_base}", "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if acompletion is True: client = AsyncAzureOpenAI(**azure_client_params) else: client = AzureOpenAI(**azure_client_params) data = {"model": None, "prompt": prompt, **optional_params} else: data = { "model": model, # type: ignore "prompt": prompt, **optional_params, } if acompletion is True: if optional_params.get("stream", False): return self.async_streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: return self.acompletion( api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token, timeout=timeout, client=client, logging_obj=logging_obj, ) elif "stream" in optional_params and optional_params["stream"] == True: return self.streaming( logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token, timeout=timeout, client=client, ) else: ## LOGGING logging_obj.pre_call( input=prompt, api_key=api_key, additional_args={ "headers": { "api_key": api_key, "azure_ad_token": azure_ad_token, }, "api_version": api_version, "api_base": api_base, "complete_input_dict": data, }, ) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance( azure_client._custom_query, dict ): # set api_version to version passed by user azure_client._custom_query.setdefault( "api-version", api_version ) response = azure_client.completions.create(**data, timeout=timeout) # type: ignore stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( input=prompt, api_key=api_key, original_response=stringified_response, additional_args={ "headers": headers, "api_version": api_version, "api_base": api_base, }, ) return ( openai_text_completion_config.convert_to_chat_model_response_object( response_object=TextCompletionResponse(**stringified_response), model_response_object=model_response, ) ) except AzureOpenAIError as e: exception_mapping_worked = True raise e except Exception as e: if hasattr(e, "status_code"): raise AzureOpenAIError(status_code=e.status_code, message=str(e)) else: raise AzureOpenAIError(status_code=500, message=str(e))
(self, api_key: str, api_version: str, model: str, api_base: str, data: dict, timeout: Any, model_response: litellm.utils.ModelResponse, azure_ad_token: Optional[str] = None, client=None, logging_obj=None)
62,961
litellm.llms.azure_text
async_streaming
null
def streaming( self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise AzureOpenAIError( status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client azure_client_params = { "api_version": api_version, "azure_endpoint": api_base, "azure_deployment": model, "http_client": litellm.client_session, "max_retries": max_retries, "timeout": timeout, } azure_client_params = select_azure_base_url_or_endpoint( azure_client_params=azure_client_params ) if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: azure_client = client if api_version is not None and isinstance(azure_client._custom_query, dict): # set api_version to version passed by user azure_client._custom_query.setdefault("api-version", api_version) ## LOGGING logging_obj.pre_call( input=data["prompt"], api_key=azure_client.api_key, additional_args={ "headers": {"Authorization": f"Bearer {azure_client.api_key}"}, "api_base": azure_client._base_url._uri_reference, "acompletion": True, "complete_input_dict": data, }, ) response = azure_client.completions.create(**data, timeout=timeout) streamwrapper = CustomStreamWrapper( completion_stream=response, model=model, custom_llm_provider="azure_text", logging_obj=logging_obj, ) return streamwrapper
(self, logging_obj, api_base: str, api_key: str, api_version: str, data: dict, model: str, timeout: Any, azure_ad_token: Optional[str] = None, client=None)
62,965
litellm.llms.base
embedding
null
def embedding( self, *args, **kwargs ): # logic for parsing in - calling - parsing out model embedding calls pass
(self, *args, **kwargs)
62,968
litellm.llms.azure_text
validate_environment
null
def validate_environment(self, api_key, azure_ad_token): headers = { "content-type": "application/json", } if api_key is not None: headers["api-key"] = api_key elif azure_ad_token is not None: headers["Authorization"] = f"Bearer {azure_ad_token}" return headers
(self, api_key, azure_ad_token)
62,969
litellm.exceptions
BadRequestError
null
class BadRequestError(openai.BadRequestError): # type: ignore def __init__( self, message, model, llm_provider, response: Optional[httpx.Response] = None ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider response = response or httpx.Response( status_code=self.status_code, request=httpx.Request( method="GET", url="https://litellm.ai" ), # mock request object ) super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(message, model, llm_provider, response: Optional[httpx.Response] = None)
62,970
litellm.exceptions
__init__
null
def __init__( self, message, model, llm_provider, response: Optional[httpx.Response] = None ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider response = response or httpx.Response( status_code=self.status_code, request=httpx.Request( method="GET", url="https://litellm.ai" ), # mock request object ) super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs
(self, message, model, llm_provider, response: Optional[httpx.Response] = None)
62,971
pydantic.main
BaseModel
Usage docs: https://docs.pydantic.dev/2.7/concepts/models/ A base class for creating Pydantic models. Attributes: __class_vars__: The names of classvars defined on the model. __private_attributes__: Metadata about the private attributes of the model. __signature__: The signature for instantiating the model. __pydantic_complete__: Whether model building is completed, or if there are still undefined fields. __pydantic_core_schema__: The pydantic-core schema used to build the SchemaValidator and SchemaSerializer. __pydantic_custom_init__: Whether the model has a custom `__init__` function. __pydantic_decorators__: Metadata containing the decorators defined on the model. This replaces `Model.__validators__` and `Model.__root_validators__` from Pydantic V1. __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to __args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these. __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models. __pydantic_post_init__: The name of the post-init method for the model, if defined. __pydantic_root_model__: Whether the model is a `RootModel`. __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model. __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model. __pydantic_extra__: An instance attribute with the values of extra fields from validation when `model_config['extra'] == 'allow'`. __pydantic_fields_set__: An instance attribute with the names of fields explicitly set. __pydantic_private__: Instance attribute with the values of private attributes set on the model instance.
class BaseModel(metaclass=_model_construction.ModelMetaclass): """Usage docs: https://docs.pydantic.dev/2.7/concepts/models/ A base class for creating Pydantic models. Attributes: __class_vars__: The names of classvars defined on the model. __private_attributes__: Metadata about the private attributes of the model. __signature__: The signature for instantiating the model. __pydantic_complete__: Whether model building is completed, or if there are still undefined fields. __pydantic_core_schema__: The pydantic-core schema used to build the SchemaValidator and SchemaSerializer. __pydantic_custom_init__: Whether the model has a custom `__init__` function. __pydantic_decorators__: Metadata containing the decorators defined on the model. This replaces `Model.__validators__` and `Model.__root_validators__` from Pydantic V1. __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to __args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these. __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models. __pydantic_post_init__: The name of the post-init method for the model, if defined. __pydantic_root_model__: Whether the model is a `RootModel`. __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model. __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model. __pydantic_extra__: An instance attribute with the values of extra fields from validation when `model_config['extra'] == 'allow'`. __pydantic_fields_set__: An instance attribute with the names of fields explicitly set. __pydantic_private__: Instance attribute with the values of private attributes set on the model instance. """ if typing.TYPE_CHECKING: # Here we provide annotations for the attributes of BaseModel. # Many of these are populated by the metaclass, which is why this section is in a `TYPE_CHECKING` block. # However, for the sake of easy review, we have included type annotations of all class and instance attributes # of `BaseModel` here: # Class attributes model_config: ClassVar[ConfigDict] """ Configuration for the model, should be a dictionary conforming to [`ConfigDict`][pydantic.config.ConfigDict]. """ model_fields: ClassVar[dict[str, FieldInfo]] """ Metadata about the fields defined on the model, mapping of field names to [`FieldInfo`][pydantic.fields.FieldInfo]. This replaces `Model.__fields__` from Pydantic V1. """ model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] """A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects.""" __class_vars__: ClassVar[set[str]] __private_attributes__: ClassVar[dict[str, ModelPrivateAttr]] __signature__: ClassVar[Signature] __pydantic_complete__: ClassVar[bool] __pydantic_core_schema__: ClassVar[CoreSchema] __pydantic_custom_init__: ClassVar[bool] __pydantic_decorators__: ClassVar[_decorators.DecoratorInfos] __pydantic_generic_metadata__: ClassVar[_generics.PydanticGenericMetadata] __pydantic_parent_namespace__: ClassVar[dict[str, Any] | None] __pydantic_post_init__: ClassVar[None | Literal['model_post_init']] __pydantic_root_model__: ClassVar[bool] __pydantic_serializer__: ClassVar[SchemaSerializer] __pydantic_validator__: ClassVar[SchemaValidator] # Instance attributes # Note: we use the non-existent kwarg `init=False` in pydantic.fields.Field below so that @dataclass_transform # doesn't think these are valid as keyword arguments to the class initializer. __pydantic_extra__: dict[str, Any] | None = _Field(init=False) # type: ignore __pydantic_fields_set__: set[str] = _Field(init=False) # type: ignore __pydantic_private__: dict[str, Any] | None = _Field(init=False) # type: ignore else: # `model_fields` and `__pydantic_decorators__` must be set for # pydantic._internal._generate_schema.GenerateSchema.model_schema to work for a plain BaseModel annotation model_fields = {} model_computed_fields = {} __pydantic_decorators__ = _decorators.DecoratorInfos() __pydantic_parent_namespace__ = None # Prevent `BaseModel` from being instantiated directly: __pydantic_validator__ = _mock_val_ser.MockValSer( 'Pydantic models should inherit from BaseModel, BaseModel cannot be instantiated directly', val_or_ser='validator', code='base-model-instantiated', ) __pydantic_serializer__ = _mock_val_ser.MockValSer( 'Pydantic models should inherit from BaseModel, BaseModel cannot be instantiated directly', val_or_ser='serializer', code='base-model-instantiated', ) __slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__' model_config = ConfigDict() __pydantic_complete__ = False __pydantic_root_model__ = False def __init__(self, /, **data: Any) -> None: # type: ignore """Create a new model by parsing and validating input data from keyword arguments. Raises [`ValidationError`][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model. `self` is explicitly positional-only to allow `self` as a field name. """ # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks __tracebackhide__ = True self.__pydantic_validator__.validate_python(data, self_instance=self) # The following line sets a flag that we use to determine when `__init__` gets overridden by the user __init__.__pydantic_base_init__ = True # pyright: ignore[reportFunctionMemberAccess] @property def model_extra(self) -> dict[str, Any] | None: """Get extra fields set during validation. Returns: A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. """ return self.__pydantic_extra__ @property def model_fields_set(self) -> set[str]: """Returns the set of fields that have been explicitly set on this model instance. Returns: A set of strings representing the fields that have been set, i.e. that were not filled from defaults. """ return self.__pydantic_fields_set__ @classmethod def model_construct(cls: type[Model], _fields_set: set[str] | None = None, **values: Any) -> Model: # noqa: C901 """Creates a new instance of the `Model` class with validated data. Creates a new model setting `__dict__` and `__pydantic_fields_set__` from trusted or pre-validated data. Default values are respected, but no other validation is performed. !!! note `model_construct()` generally respects the `model_config.extra` setting on the provided model. That is, if `model_config.extra == 'allow'`, then all extra passed values are added to the model instance's `__dict__` and `__pydantic_extra__` fields. If `model_config.extra == 'ignore'` (the default), then all extra passed values are ignored. Because no validation is performed with a call to `model_construct()`, having `model_config.extra == 'forbid'` does not result in an error if extra values are passed, but they will be ignored. Args: _fields_set: The set of field names accepted for the Model instance. values: Trusted or pre-validated data dictionary. Returns: A new instance of the `Model` class with validated data. """ m = cls.__new__(cls) fields_values: dict[str, Any] = {} fields_set = set() for name, field in cls.model_fields.items(): if field.alias is not None and field.alias in values: fields_values[name] = values.pop(field.alias) fields_set.add(name) if (name not in fields_set) and (field.validation_alias is not None): validation_aliases: list[str | AliasPath] = ( field.validation_alias.choices if isinstance(field.validation_alias, AliasChoices) else [field.validation_alias] ) for alias in validation_aliases: if isinstance(alias, str) and alias in values: fields_values[name] = values.pop(alias) fields_set.add(name) break elif isinstance(alias, AliasPath): value = alias.search_dict_for_path(values) if value is not PydanticUndefined: fields_values[name] = value fields_set.add(name) break if name not in fields_set: if name in values: fields_values[name] = values.pop(name) fields_set.add(name) elif not field.is_required(): fields_values[name] = field.get_default(call_default_factory=True) if _fields_set is None: _fields_set = fields_set _extra: dict[str, Any] | None = ( {k: v for k, v in values.items()} if cls.model_config.get('extra') == 'allow' else None ) _object_setattr(m, '__dict__', fields_values) _object_setattr(m, '__pydantic_fields_set__', _fields_set) if not cls.__pydantic_root_model__: _object_setattr(m, '__pydantic_extra__', _extra) if cls.__pydantic_post_init__: m.model_post_init(None) # update private attributes with values set if hasattr(m, '__pydantic_private__') and m.__pydantic_private__ is not None: for k, v in values.items(): if k in m.__private_attributes__: m.__pydantic_private__[k] = v elif not cls.__pydantic_root_model__: # Note: if there are any private attributes, cls.__pydantic_post_init__ would exist # Since it doesn't, that means that `__pydantic_private__` should be set to None _object_setattr(m, '__pydantic_private__', None) return m def model_copy(self: Model, *, update: dict[str, Any] | None = None, deep: bool = False) -> Model: """Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#model_copy Returns a copy of the model. Args: update: Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. deep: Set to `True` to make a deep copy of the model. Returns: New model instance. """ copied = self.__deepcopy__() if deep else self.__copy__() if update: if self.model_config.get('extra') == 'allow': for k, v in update.items(): if k in self.model_fields: copied.__dict__[k] = v else: if copied.__pydantic_extra__ is None: copied.__pydantic_extra__ = {} copied.__pydantic_extra__[k] = v else: copied.__dict__.update(update) copied.__pydantic_fields_set__.update(update.keys()) return copied def model_dump( self, *, mode: Literal['json', 'python'] | str = 'python', include: IncEx = None, exclude: IncEx = None, context: dict[str, Any] | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, serialize_as_any: bool = False, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. Args: mode: The mode in which `to_python` should run. If mode is 'json', the output will only contain JSON serializable types. If mode is 'python', the output may contain non-JSON-serializable Python objects. include: A set of fields to include in the output. exclude: A set of fields to exclude from the output. context: Additional context to pass to the serializer. by_alias: Whether to use the field's alias in the dictionary key if defined. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value. exclude_none: Whether to exclude fields that have a value of `None`. round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. serialize_as_any: Whether to serialize fields with duck-typing serialization behavior. Returns: A dictionary representation of the model. """ return self.__pydantic_serializer__.to_python( self, mode=mode, by_alias=by_alias, include=include, exclude=exclude, context=context, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, serialize_as_any=serialize_as_any, ) def model_dump_json( self, *, indent: int | None = None, include: IncEx = None, exclude: IncEx = None, context: dict[str, Any] | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, serialize_as_any: bool = False, ) -> str: """Usage docs: https://docs.pydantic.dev/2.7/concepts/serialization/#modelmodel_dump_json Generates a JSON representation of the model using Pydantic's `to_json` method. Args: indent: Indentation to use in the JSON output. If None is passed, the output will be compact. include: Field(s) to include in the JSON output. exclude: Field(s) to exclude from the JSON output. context: Additional context to pass to the serializer. by_alias: Whether to serialize using field aliases. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value. exclude_none: Whether to exclude fields that have a value of `None`. round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. serialize_as_any: Whether to serialize fields with duck-typing serialization behavior. Returns: A JSON string representation of the model. """ return self.__pydantic_serializer__.to_json( self, indent=indent, include=include, exclude=exclude, context=context, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, serialize_as_any=serialize_as_any, ).decode() @classmethod def model_json_schema( cls, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, mode: JsonSchemaMode = 'validation', ) -> dict[str, Any]: """Generates a JSON schema for a model class. Args: by_alias: Whether to use attribute aliases or not. ref_template: The reference template. schema_generator: To override the logic used to generate the JSON schema, as a subclass of `GenerateJsonSchema` with your desired modifications mode: The mode in which to generate the schema. Returns: The JSON schema for the given model class. """ return model_json_schema( cls, by_alias=by_alias, ref_template=ref_template, schema_generator=schema_generator, mode=mode ) @classmethod def model_parametrized_name(cls, params: tuple[type[Any], ...]) -> str: """Compute the class name for parametrizations of generic classes. This method can be overridden to achieve a custom naming scheme for generic BaseModels. Args: params: Tuple of types of the class. Given a generic class `Model` with 2 type variables and a concrete model `Model[str, int]`, the value `(str, int)` would be passed to `params`. Returns: String representing the new class where `params` are passed to `cls` as type variables. Raises: TypeError: Raised when trying to generate concrete names for non-generic models. """ if not issubclass(cls, typing.Generic): raise TypeError('Concrete names should only be generated for generic models.') # Any strings received should represent forward references, so we handle them specially below. # If we eventually move toward wrapping them in a ForwardRef in __class_getitem__ in the future, # we may be able to remove this special case. param_names = [param if isinstance(param, str) else _repr.display_as_type(param) for param in params] params_component = ', '.join(param_names) return f'{cls.__name__}[{params_component}]' def model_post_init(self, __context: Any) -> None: """Override this method to perform additional initialization after `__init__` and `model_construct`. This is useful if you want to do some validation that requires the entire model to be initialized. """ pass @classmethod def model_rebuild( cls, *, force: bool = False, raise_errors: bool = True, _parent_namespace_depth: int = 2, _types_namespace: dict[str, Any] | None = None, ) -> bool | None: """Try to rebuild the pydantic-core schema for the model. This may be necessary when one of the annotations is a ForwardRef which could not be resolved during the initial attempt to build the schema, and automatic rebuilding fails. Args: force: Whether to force the rebuilding of the model schema, defaults to `False`. raise_errors: Whether to raise errors, defaults to `True`. _parent_namespace_depth: The depth level of the parent namespace, defaults to 2. _types_namespace: The types namespace, defaults to `None`. Returns: Returns `None` if the schema is already "complete" and rebuilding was not required. If rebuilding _was_ required, returns `True` if rebuilding was successful, otherwise `False`. """ if not force and cls.__pydantic_complete__: return None else: if '__pydantic_core_schema__' in cls.__dict__: delattr(cls, '__pydantic_core_schema__') # delete cached value to ensure full rebuild happens if _types_namespace is not None: types_namespace: dict[str, Any] | None = _types_namespace.copy() else: if _parent_namespace_depth > 0: frame_parent_ns = _typing_extra.parent_frame_namespace(parent_depth=_parent_namespace_depth) or {} cls_parent_ns = ( _model_construction.unpack_lenient_weakvaluedict(cls.__pydantic_parent_namespace__) or {} ) types_namespace = {**cls_parent_ns, **frame_parent_ns} cls.__pydantic_parent_namespace__ = _model_construction.build_lenient_weakvaluedict(types_namespace) else: types_namespace = _model_construction.unpack_lenient_weakvaluedict( cls.__pydantic_parent_namespace__ ) types_namespace = _typing_extra.get_cls_types_namespace(cls, types_namespace) # manually override defer_build so complete_model_class doesn't skip building the model again config = {**cls.model_config, 'defer_build': False} return _model_construction.complete_model_class( cls, cls.__name__, _config.ConfigWrapper(config, check=False), raise_errors=raise_errors, types_namespace=types_namespace, ) @classmethod def model_validate( cls: type[Model], obj: Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: dict[str, Any] | None = None, ) -> Model: """Validate a pydantic model instance. Args: obj: The object to validate. strict: Whether to enforce types strictly. from_attributes: Whether to extract data from object attributes. context: Additional context to pass to the validator. Raises: ValidationError: If the object could not be validated. Returns: The validated model instance. """ # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks __tracebackhide__ = True return cls.__pydantic_validator__.validate_python( obj, strict=strict, from_attributes=from_attributes, context=context ) @classmethod def model_validate_json( cls: type[Model], json_data: str | bytes | bytearray, *, strict: bool | None = None, context: dict[str, Any] | None = None, ) -> Model: """Usage docs: https://docs.pydantic.dev/2.7/concepts/json/#json-parsing Validate the given JSON data against the Pydantic model. Args: json_data: The JSON data to validate. strict: Whether to enforce types strictly. context: Extra variables to pass to the validator. Returns: The validated Pydantic model. Raises: ValueError: If `json_data` is not a JSON string. """ # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks __tracebackhide__ = True return cls.__pydantic_validator__.validate_json(json_data, strict=strict, context=context) @classmethod def model_validate_strings( cls: type[Model], obj: Any, *, strict: bool | None = None, context: dict[str, Any] | None = None, ) -> Model: """Validate the given object contains string data against the Pydantic model. Args: obj: The object contains string data to validate. strict: Whether to enforce types strictly. context: Extra variables to pass to the validator. Returns: The validated Pydantic model. """ # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks __tracebackhide__ = True return cls.__pydantic_validator__.validate_strings(obj, strict=strict, context=context) @classmethod def __get_pydantic_core_schema__(cls, source: type[BaseModel], handler: GetCoreSchemaHandler, /) -> CoreSchema: """Hook into generating the model's CoreSchema. Args: source: The class we are generating a schema for. This will generally be the same as the `cls` argument if this is a classmethod. handler: Call into Pydantic's internal JSON schema generation. A callable that calls into Pydantic's internal CoreSchema generation logic. Returns: A `pydantic-core` `CoreSchema`. """ # Only use the cached value from this _exact_ class; we don't want one from a parent class # This is why we check `cls.__dict__` and don't use `cls.__pydantic_core_schema__` or similar. if '__pydantic_core_schema__' in cls.__dict__: # Due to the way generic classes are built, it's possible that an invalid schema may be temporarily # set on generic classes. I think we could resolve this to ensure that we get proper schema caching # for generics, but for simplicity for now, we just always rebuild if the class has a generic origin. if not cls.__pydantic_generic_metadata__['origin']: return cls.__pydantic_core_schema__ return handler(source) @classmethod def __get_pydantic_json_schema__( cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler, /, ) -> JsonSchemaValue: """Hook into generating the model's JSON schema. Args: core_schema: A `pydantic-core` CoreSchema. You can ignore this argument and call the handler with a new CoreSchema, wrap this CoreSchema (`{'type': 'nullable', 'schema': current_schema}`), or just call the handler with the original schema. handler: Call into Pydantic's internal JSON schema generation. This will raise a `pydantic.errors.PydanticInvalidForJsonSchema` if JSON schema generation fails. Since this gets called by `BaseModel.model_json_schema` you can override the `schema_generator` argument to that function to change JSON schema generation globally for a type. Returns: A JSON schema, as a Python object. """ return handler(core_schema) @classmethod def __pydantic_init_subclass__(cls, **kwargs: Any) -> None: """This is intended to behave just like `__init_subclass__`, but is called by `ModelMetaclass` only after the class is actually fully initialized. In particular, attributes like `model_fields` will be present when this is called. This is necessary because `__init_subclass__` will always be called by `type.__new__`, and it would require a prohibitively large refactor to the `ModelMetaclass` to ensure that `type.__new__` was called in such a manner that the class would already be sufficiently initialized. This will receive the same `kwargs` that would be passed to the standard `__init_subclass__`, namely, any kwargs passed to the class definition that aren't used internally by pydantic. Args: **kwargs: Any keyword arguments passed to the class definition that aren't used internally by pydantic. """ pass def __class_getitem__( cls, typevar_values: type[Any] | tuple[type[Any], ...] ) -> type[BaseModel] | _forward_ref.PydanticRecursiveRef: cached = _generics.get_cached_generic_type_early(cls, typevar_values) if cached is not None: return cached if cls is BaseModel: raise TypeError('Type parameters should be placed on typing.Generic, not BaseModel') if not hasattr(cls, '__parameters__'): raise TypeError(f'{cls} cannot be parametrized because it does not inherit from typing.Generic') if not cls.__pydantic_generic_metadata__['parameters'] and typing.Generic not in cls.__bases__: raise TypeError(f'{cls} is not a generic class') if not isinstance(typevar_values, tuple): typevar_values = (typevar_values,) _generics.check_parameters_count(cls, typevar_values) # Build map from generic typevars to passed params typevars_map: dict[_typing_extra.TypeVarType, type[Any]] = dict( zip(cls.__pydantic_generic_metadata__['parameters'], typevar_values) ) if _utils.all_identical(typevars_map.keys(), typevars_map.values()) and typevars_map: submodel = cls # if arguments are equal to parameters it's the same object _generics.set_cached_generic_type(cls, typevar_values, submodel) else: parent_args = cls.__pydantic_generic_metadata__['args'] if not parent_args: args = typevar_values else: args = tuple(_generics.replace_types(arg, typevars_map) for arg in parent_args) origin = cls.__pydantic_generic_metadata__['origin'] or cls model_name = origin.model_parametrized_name(args) params = tuple( {param: None for param in _generics.iter_contained_typevars(typevars_map.values())} ) # use dict as ordered set with _generics.generic_recursion_self_type(origin, args) as maybe_self_type: if maybe_self_type is not None: return maybe_self_type cached = _generics.get_cached_generic_type_late(cls, typevar_values, origin, args) if cached is not None: return cached # Attempt to rebuild the origin in case new types have been defined try: # depth 3 gets you above this __class_getitem__ call origin.model_rebuild(_parent_namespace_depth=3) except PydanticUndefinedAnnotation: # It's okay if it fails, it just means there are still undefined types # that could be evaluated later. # TODO: Make sure validation fails if there are still undefined types, perhaps using MockValidator pass submodel = _generics.create_generic_submodel(model_name, origin, args, params) # Update cache _generics.set_cached_generic_type(cls, typevar_values, submodel, origin, args) return submodel def __copy__(self: Model) -> Model: """Returns a shallow copy of the model.""" cls = type(self) m = cls.__new__(cls) _object_setattr(m, '__dict__', copy(self.__dict__)) _object_setattr(m, '__pydantic_extra__', copy(self.__pydantic_extra__)) _object_setattr(m, '__pydantic_fields_set__', copy(self.__pydantic_fields_set__)) if not hasattr(self, '__pydantic_private__') or self.__pydantic_private__ is None: _object_setattr(m, '__pydantic_private__', None) else: _object_setattr( m, '__pydantic_private__', {k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined}, ) return m def __deepcopy__(self: Model, memo: dict[int, Any] | None = None) -> Model: """Returns a deep copy of the model.""" cls = type(self) m = cls.__new__(cls) _object_setattr(m, '__dict__', deepcopy(self.__dict__, memo=memo)) _object_setattr(m, '__pydantic_extra__', deepcopy(self.__pydantic_extra__, memo=memo)) # This next line doesn't need a deepcopy because __pydantic_fields_set__ is a set[str], # and attempting a deepcopy would be marginally slower. _object_setattr(m, '__pydantic_fields_set__', copy(self.__pydantic_fields_set__)) if not hasattr(self, '__pydantic_private__') or self.__pydantic_private__ is None: _object_setattr(m, '__pydantic_private__', None) else: _object_setattr( m, '__pydantic_private__', deepcopy({k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined}, memo=memo), ) return m if not typing.TYPE_CHECKING: # We put `__getattr__` in a non-TYPE_CHECKING block because otherwise, mypy allows arbitrary attribute access # The same goes for __setattr__ and __delattr__, see: https://github.com/pydantic/pydantic/issues/8643 def __getattr__(self, item: str) -> Any: private_attributes = object.__getattribute__(self, '__private_attributes__') if item in private_attributes: attribute = private_attributes[item] if hasattr(attribute, '__get__'): return attribute.__get__(self, type(self)) # type: ignore try: # Note: self.__pydantic_private__ cannot be None if self.__private_attributes__ has items return self.__pydantic_private__[item] # type: ignore except KeyError as exc: raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc else: # `__pydantic_extra__` can fail to be set if the model is not yet fully initialized. # See `BaseModel.__repr_args__` for more details try: pydantic_extra = object.__getattribute__(self, '__pydantic_extra__') except AttributeError: pydantic_extra = None if pydantic_extra: try: return pydantic_extra[item] except KeyError as exc: raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc else: if hasattr(self.__class__, item): return super().__getattribute__(item) # Raises AttributeError if appropriate else: # this is the current error raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') def __setattr__(self, name: str, value: Any) -> None: if name in self.__class_vars__: raise AttributeError( f'{name!r} is a ClassVar of `{self.__class__.__name__}` and cannot be set on an instance. ' f'If you want to set a value on the class, use `{self.__class__.__name__}.{name} = value`.' ) elif not _fields.is_valid_field_name(name): if self.__pydantic_private__ is None or name not in self.__private_attributes__: _object_setattr(self, name, value) else: attribute = self.__private_attributes__[name] if hasattr(attribute, '__set__'): attribute.__set__(self, value) # type: ignore else: self.__pydantic_private__[name] = value return self._check_frozen(name, value) attr = getattr(self.__class__, name, None) if isinstance(attr, property): attr.__set__(self, value) elif self.model_config.get('validate_assignment', None): self.__pydantic_validator__.validate_assignment(self, name, value) elif self.model_config.get('extra') != 'allow' and name not in self.model_fields: # TODO - matching error raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"') elif self.model_config.get('extra') == 'allow' and name not in self.model_fields: if self.model_extra and name in self.model_extra: self.__pydantic_extra__[name] = value # type: ignore else: try: getattr(self, name) except AttributeError: # attribute does not already exist on instance, so put it in extra self.__pydantic_extra__[name] = value # type: ignore else: # attribute _does_ already exist on instance, and was not in extra, so update it _object_setattr(self, name, value) else: self.__dict__[name] = value self.__pydantic_fields_set__.add(name) def __delattr__(self, item: str) -> Any: if item in self.__private_attributes__: attribute = self.__private_attributes__[item] if hasattr(attribute, '__delete__'): attribute.__delete__(self) # type: ignore return try: # Note: self.__pydantic_private__ cannot be None if self.__private_attributes__ has items del self.__pydantic_private__[item] # type: ignore return except KeyError as exc: raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc self._check_frozen(item, None) if item in self.model_fields: object.__delattr__(self, item) elif self.__pydantic_extra__ is not None and item in self.__pydantic_extra__: del self.__pydantic_extra__[item] else: try: object.__delattr__(self, item) except AttributeError: raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') def _check_frozen(self, name: str, value: Any) -> None: if self.model_config.get('frozen', None): typ = 'frozen_instance' elif getattr(self.model_fields.get(name), 'frozen', False): typ = 'frozen_field' else: return error: pydantic_core.InitErrorDetails = { 'type': typ, 'loc': (name,), 'input': value, } raise pydantic_core.ValidationError.from_exception_data(self.__class__.__name__, [error]) def __getstate__(self) -> dict[Any, Any]: private = self.__pydantic_private__ if private: private = {k: v for k, v in private.items() if v is not PydanticUndefined} return { '__dict__': self.__dict__, '__pydantic_extra__': self.__pydantic_extra__, '__pydantic_fields_set__': self.__pydantic_fields_set__, '__pydantic_private__': private, } def __setstate__(self, state: dict[Any, Any]) -> None: _object_setattr(self, '__pydantic_fields_set__', state['__pydantic_fields_set__']) _object_setattr(self, '__pydantic_extra__', state['__pydantic_extra__']) _object_setattr(self, '__pydantic_private__', state['__pydantic_private__']) _object_setattr(self, '__dict__', state['__dict__']) if not typing.TYPE_CHECKING: def __eq__(self, other: Any) -> bool: if isinstance(other, BaseModel): # When comparing instances of generic types for equality, as long as all field values are equal, # only require their generic origin types to be equal, rather than exact type equality. # This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1). self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__ other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__ # Perform common checks first if not ( self_type == other_type and getattr(self, '__pydantic_private__', None) == getattr(other, '__pydantic_private__', None) and self.__pydantic_extra__ == other.__pydantic_extra__ ): return False # We only want to compare pydantic fields but ignoring fields is costly. # We'll perform a fast check first, and fallback only when needed # See GH-7444 and GH-7825 for rationale and a performance benchmark # First, do the fast (and sometimes faulty) __dict__ comparison if self.__dict__ == other.__dict__: # If the check above passes, then pydantic fields are equal, we can return early return True # We don't want to trigger unnecessary costly filtering of __dict__ on all unequal objects, so we return # early if there are no keys to ignore (we would just return False later on anyway) model_fields = type(self).model_fields.keys() if self.__dict__.keys() <= model_fields and other.__dict__.keys() <= model_fields: return False # If we reach here, there are non-pydantic-fields keys, mapped to unequal values, that we need to ignore # Resort to costly filtering of the __dict__ objects # We use operator.itemgetter because it is much faster than dict comprehensions # NOTE: Contrary to standard python class and instances, when the Model class has a default value for an # attribute and the model instance doesn't have a corresponding attribute, accessing the missing attribute # raises an error in BaseModel.__getattr__ instead of returning the class attribute # So we can use operator.itemgetter() instead of operator.attrgetter() getter = operator.itemgetter(*model_fields) if model_fields else lambda _: _utils._SENTINEL try: return getter(self.__dict__) == getter(other.__dict__) except KeyError: # In rare cases (such as when using the deprecated BaseModel.copy() method), # the __dict__ may not contain all model fields, which is how we can get here. # getter(self.__dict__) is much faster than any 'safe' method that accounts # for missing keys, and wrapping it in a `try` doesn't slow things down much # in the common case. self_fields_proxy = _utils.SafeGetItemProxy(self.__dict__) other_fields_proxy = _utils.SafeGetItemProxy(other.__dict__) return getter(self_fields_proxy) == getter(other_fields_proxy) # other instance is not a BaseModel else: return NotImplemented # delegate to the other item in the comparison if typing.TYPE_CHECKING: # We put `__init_subclass__` in a TYPE_CHECKING block because, even though we want the type-checking benefits # described in the signature of `__init_subclass__` below, we don't want to modify the default behavior of # subclass initialization. def __init_subclass__(cls, **kwargs: Unpack[ConfigDict]): """This signature is included purely to help type-checkers check arguments to class declaration, which provides a way to conveniently set model_config key/value pairs. ```py from pydantic import BaseModel class MyModel(BaseModel, extra='allow'): ... ``` However, this may be deceiving, since the _actual_ calls to `__init_subclass__` will not receive any of the config arguments, and will only receive any keyword arguments passed during class initialization that are _not_ expected keys in ConfigDict. (This is due to the way `ModelMetaclass.__new__` works.) Args: **kwargs: Keyword arguments passed to the class definition, which set model_config Note: You may want to override `__pydantic_init_subclass__` instead, which behaves similarly but is called *after* the class is fully initialized. """ def __iter__(self) -> TupleGenerator: """So `dict(model)` works.""" yield from [(k, v) for (k, v) in self.__dict__.items() if not k.startswith('_')] extra = self.__pydantic_extra__ if extra: yield from extra.items() def __repr__(self) -> str: return f'{self.__repr_name__()}({self.__repr_str__(", ")})' def __repr_args__(self) -> _repr.ReprArgs: for k, v in self.__dict__.items(): field = self.model_fields.get(k) if field and field.repr: yield k, v # `__pydantic_extra__` can fail to be set if the model is not yet fully initialized. # This can happen if a `ValidationError` is raised during initialization and the instance's # repr is generated as part of the exception handling. Therefore, we use `getattr` here # with a fallback, even though the type hints indicate the attribute will always be present. try: pydantic_extra = object.__getattribute__(self, '__pydantic_extra__') except AttributeError: pydantic_extra = None if pydantic_extra is not None: yield from ((k, v) for k, v in pydantic_extra.items()) yield from ((k, getattr(self, k)) for k, v in self.model_computed_fields.items() if v.repr) # take logic from `_repr.Representation` without the side effects of inheritance, see #5740 __repr_name__ = _repr.Representation.__repr_name__ __repr_str__ = _repr.Representation.__repr_str__ __pretty__ = _repr.Representation.__pretty__ __rich_repr__ = _repr.Representation.__rich_repr__ def __str__(self) -> str: return self.__repr_str__(' ') # ##### Deprecated methods from v1 ##### @property @typing_extensions.deprecated( 'The `__fields__` attribute is deprecated, use `model_fields` instead.', category=None ) def __fields__(self) -> dict[str, FieldInfo]: warnings.warn( 'The `__fields__` attribute is deprecated, use `model_fields` instead.', category=PydanticDeprecatedSince20 ) return self.model_fields @property @typing_extensions.deprecated( 'The `__fields_set__` attribute is deprecated, use `model_fields_set` instead.', category=None, ) def __fields_set__(self) -> set[str]: warnings.warn( 'The `__fields_set__` attribute is deprecated, use `model_fields_set` instead.', category=PydanticDeprecatedSince20, ) return self.__pydantic_fields_set__ @typing_extensions.deprecated('The `dict` method is deprecated; use `model_dump` instead.', category=None) def dict( # noqa: D102 self, *, include: IncEx = None, exclude: IncEx = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, ) -> typing.Dict[str, Any]: # noqa UP006 warnings.warn('The `dict` method is deprecated; use `model_dump` instead.', category=PydanticDeprecatedSince20) return self.model_dump( include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) @typing_extensions.deprecated('The `json` method is deprecated; use `model_dump_json` instead.', category=None) def json( # noqa: D102 self, *, include: IncEx = None, exclude: IncEx = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: typing.Callable[[Any], Any] | None = PydanticUndefined, # type: ignore[assignment] models_as_dict: bool = PydanticUndefined, # type: ignore[assignment] **dumps_kwargs: Any, ) -> str: warnings.warn( 'The `json` method is deprecated; use `model_dump_json` instead.', category=PydanticDeprecatedSince20 ) if encoder is not PydanticUndefined: raise TypeError('The `encoder` argument is no longer supported; use field serializers instead.') if models_as_dict is not PydanticUndefined: raise TypeError('The `models_as_dict` argument is no longer supported; use a model serializer instead.') if dumps_kwargs: raise TypeError('`dumps_kwargs` keyword arguments are no longer supported.') return self.model_dump_json( include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) @classmethod @typing_extensions.deprecated('The `parse_obj` method is deprecated; use `model_validate` instead.', category=None) def parse_obj(cls: type[Model], obj: Any) -> Model: # noqa: D102 warnings.warn( 'The `parse_obj` method is deprecated; use `model_validate` instead.', category=PydanticDeprecatedSince20 ) return cls.model_validate(obj) @classmethod @typing_extensions.deprecated( 'The `parse_raw` method is deprecated; if your data is JSON use `model_validate_json`, ' 'otherwise load the data then use `model_validate` instead.', category=None, ) def parse_raw( # noqa: D102 cls: type[Model], b: str | bytes, *, content_type: str | None = None, encoding: str = 'utf8', proto: DeprecatedParseProtocol | None = None, allow_pickle: bool = False, ) -> Model: # pragma: no cover warnings.warn( 'The `parse_raw` method is deprecated; if your data is JSON use `model_validate_json`, ' 'otherwise load the data then use `model_validate` instead.', category=PydanticDeprecatedSince20, ) from .deprecated import parse try: obj = parse.load_str_bytes( b, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle, ) except (ValueError, TypeError) as exc: import json # try to match V1 if isinstance(exc, UnicodeDecodeError): type_str = 'value_error.unicodedecode' elif isinstance(exc, json.JSONDecodeError): type_str = 'value_error.jsondecode' elif isinstance(exc, ValueError): type_str = 'value_error' else: type_str = 'type_error' # ctx is missing here, but since we've added `input` to the error, we're not pretending it's the same error: pydantic_core.InitErrorDetails = { # The type: ignore on the next line is to ignore the requirement of LiteralString 'type': pydantic_core.PydanticCustomError(type_str, str(exc)), # type: ignore 'loc': ('__root__',), 'input': b, } raise pydantic_core.ValidationError.from_exception_data(cls.__name__, [error]) return cls.model_validate(obj) @classmethod @typing_extensions.deprecated( 'The `parse_file` method is deprecated; load the data from file, then if your data is JSON ' 'use `model_validate_json`, otherwise `model_validate` instead.', category=None, ) def parse_file( # noqa: D102 cls: type[Model], path: str | Path, *, content_type: str | None = None, encoding: str = 'utf8', proto: DeprecatedParseProtocol | None = None, allow_pickle: bool = False, ) -> Model: warnings.warn( 'The `parse_file` method is deprecated; load the data from file, then if your data is JSON ' 'use `model_validate_json`, otherwise `model_validate` instead.', category=PydanticDeprecatedSince20, ) from .deprecated import parse obj = parse.load_file( path, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle, ) return cls.parse_obj(obj) @classmethod @typing_extensions.deprecated( 'The `from_orm` method is deprecated; set ' "`model_config['from_attributes']=True` and use `model_validate` instead.", category=None, ) def from_orm(cls: type[Model], obj: Any) -> Model: # noqa: D102 warnings.warn( 'The `from_orm` method is deprecated; set ' "`model_config['from_attributes']=True` and use `model_validate` instead.", category=PydanticDeprecatedSince20, ) if not cls.model_config.get('from_attributes', None): raise PydanticUserError( 'You must set the config attribute `from_attributes=True` to use from_orm', code=None ) return cls.model_validate(obj) @classmethod @typing_extensions.deprecated('The `construct` method is deprecated; use `model_construct` instead.', category=None) def construct(cls: type[Model], _fields_set: set[str] | None = None, **values: Any) -> Model: # noqa: D102 warnings.warn( 'The `construct` method is deprecated; use `model_construct` instead.', category=PydanticDeprecatedSince20 ) return cls.model_construct(_fields_set=_fields_set, **values) @typing_extensions.deprecated( 'The `copy` method is deprecated; use `model_copy` instead. ' 'See the docstring of `BaseModel.copy` for details about how to handle `include` and `exclude`.', category=None, ) def copy( self: Model, *, include: AbstractSetIntStr | MappingIntStrAny | None = None, exclude: AbstractSetIntStr | MappingIntStrAny | None = None, update: typing.Dict[str, Any] | None = None, # noqa UP006 deep: bool = False, ) -> Model: # pragma: no cover """Returns a copy of the model. !!! warning "Deprecated" This method is now deprecated; use `model_copy` instead. If you need `include` or `exclude`, use: ```py data = self.model_dump(include=include, exclude=exclude, round_trip=True) data = {**data, **(update or {})} copied = self.model_validate(data) ``` Args: include: Optional set or mapping specifying which fields to include in the copied model. exclude: Optional set or mapping specifying which fields to exclude in the copied model. update: Optional dictionary of field-value pairs to override field values in the copied model. deep: If True, the values of fields that are Pydantic models will be deep-copied. Returns: A copy of the model with included, excluded and updated fields as specified. """ warnings.warn( 'The `copy` method is deprecated; use `model_copy` instead. ' 'See the docstring of `BaseModel.copy` for details about how to handle `include` and `exclude`.', category=PydanticDeprecatedSince20, ) from .deprecated import copy_internals values = dict( copy_internals._iter( self, to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False ), **(update or {}), ) if self.__pydantic_private__ is None: private = None else: private = {k: v for k, v in self.__pydantic_private__.items() if v is not PydanticUndefined} if self.__pydantic_extra__ is None: extra: dict[str, Any] | None = None else: extra = self.__pydantic_extra__.copy() for k in list(self.__pydantic_extra__): if k not in values: # k was in the exclude extra.pop(k) for k in list(values): if k in self.__pydantic_extra__: # k must have come from extra extra[k] = values.pop(k) # new `__pydantic_fields_set__` can have unset optional fields with a set value in `update` kwarg if update: fields_set = self.__pydantic_fields_set__ | update.keys() else: fields_set = set(self.__pydantic_fields_set__) # removing excluded fields from `__pydantic_fields_set__` if exclude: fields_set -= set(exclude) return copy_internals._copy_and_set_values(self, values, fields_set, extra, private, deep=deep) @classmethod @typing_extensions.deprecated('The `schema` method is deprecated; use `model_json_schema` instead.', category=None) def schema( # noqa: D102 cls, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE ) -> typing.Dict[str, Any]: # noqa UP006 warnings.warn( 'The `schema` method is deprecated; use `model_json_schema` instead.', category=PydanticDeprecatedSince20 ) return cls.model_json_schema(by_alias=by_alias, ref_template=ref_template) @classmethod @typing_extensions.deprecated( 'The `schema_json` method is deprecated; use `model_json_schema` and json.dumps instead.', category=None, ) def schema_json( # noqa: D102 cls, *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, **dumps_kwargs: Any ) -> str: # pragma: no cover warnings.warn( 'The `schema_json` method is deprecated; use `model_json_schema` and json.dumps instead.', category=PydanticDeprecatedSince20, ) import json from .deprecated.json import pydantic_encoder return json.dumps( cls.model_json_schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs, ) @classmethod @typing_extensions.deprecated('The `validate` method is deprecated; use `model_validate` instead.', category=None) def validate(cls: type[Model], value: Any) -> Model: # noqa: D102 warnings.warn( 'The `validate` method is deprecated; use `model_validate` instead.', category=PydanticDeprecatedSince20 ) return cls.model_validate(value) @classmethod @typing_extensions.deprecated( 'The `update_forward_refs` method is deprecated; use `model_rebuild` instead.', category=None, ) def update_forward_refs(cls, **localns: Any) -> None: # noqa: D102 warnings.warn( 'The `update_forward_refs` method is deprecated; use `model_rebuild` instead.', category=PydanticDeprecatedSince20, ) if localns: # pragma: no cover raise TypeError('`localns` arguments are not longer accepted.') cls.model_rebuild(force=True) @typing_extensions.deprecated( 'The private method `_iter` will be removed and should no longer be used.', category=None ) def _iter(self, *args: Any, **kwargs: Any) -> Any: warnings.warn( 'The private method `_iter` will be removed and should no longer be used.', category=PydanticDeprecatedSince20, ) from .deprecated import copy_internals return copy_internals._iter(self, *args, **kwargs) @typing_extensions.deprecated( 'The private method `_copy_and_set_values` will be removed and should no longer be used.', category=None, ) def _copy_and_set_values(self, *args: Any, **kwargs: Any) -> Any: warnings.warn( 'The private method `_copy_and_set_values` will be removed and should no longer be used.', category=PydanticDeprecatedSince20, ) from .deprecated import copy_internals return copy_internals._copy_and_set_values(self, *args, **kwargs) @classmethod @typing_extensions.deprecated( 'The private method `_get_value` will be removed and should no longer be used.', category=None, ) def _get_value(cls, *args: Any, **kwargs: Any) -> Any: warnings.warn( 'The private method `_get_value` will be removed and should no longer be used.', category=PydanticDeprecatedSince20, ) from .deprecated import copy_internals return copy_internals._get_value(cls, *args, **kwargs) @typing_extensions.deprecated( 'The private method `_calculate_keys` will be removed and should no longer be used.', category=None, ) def _calculate_keys(self, *args: Any, **kwargs: Any) -> Any: warnings.warn( 'The private method `_calculate_keys` will be removed and should no longer be used.', category=PydanticDeprecatedSince20, ) from .deprecated import copy_internals return copy_internals._calculate_keys(self, *args, **kwargs)
(**data: 'Any') -> 'None'
63,000
litellm.llms.bedrock_httpx
BedrockLLM
Example call ``` curl --location --request POST 'https://bedrock-runtime.{aws_region_name}.amazonaws.com/model/{bedrock_model_name}/invoke' --header 'Content-Type: application/json' --header 'Accept: application/json' --user "$AWS_ACCESS_KEY_ID":"$AWS_SECRET_ACCESS_KEY" --aws-sigv4 "aws:amz:us-east-1:bedrock" --data-raw '{ "prompt": "Hi", "temperature": 0, "p": 0.9, "max_tokens": 4096 }' ```
class BedrockLLM(BaseLLM): """ Example call ``` curl --location --request POST 'https://bedrock-runtime.{aws_region_name}.amazonaws.com/model/{bedrock_model_name}/invoke' \ --header 'Content-Type: application/json' \ --header 'Accept: application/json' \ --user "$AWS_ACCESS_KEY_ID":"$AWS_SECRET_ACCESS_KEY" \ --aws-sigv4 "aws:amz:us-east-1:bedrock" \ --data-raw '{ "prompt": "Hi", "temperature": 0, "p": 0.9, "max_tokens": 4096 }' ``` """ def __init__(self) -> None: super().__init__() def convert_messages_to_prompt( self, model, messages, provider, custom_prompt_dict ) -> Tuple[str, Optional[list]]: # handle anthropic prompts and amazon titan prompts prompt = "" chat_history: Optional[list] = None if provider == "anthropic" or provider == "amazon": if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "mistral": prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "meta": prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "cohere": prompt, chat_history = cohere_message_pt(messages=messages) else: prompt = "" for message in messages: if "role" in message: if message["role"] == "user": prompt += f"{message['content']}" else: prompt += f"{message['content']}" else: prompt += f"{message['content']}" return prompt, chat_history # type: ignore def get_credentials( self, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, ): """ Return a boto3.Credentials object """ import boto3 ## CHECK IS 'os.environ/' passed in params_to_check: List[Optional[str]] = [ aws_access_key_id, aws_secret_access_key, aws_region_name, aws_session_name, aws_profile_name, aws_role_name, ] # Iterate over parameters and update if needed for i, param in enumerate(params_to_check): if param and param.startswith("os.environ/"): _v = get_secret(param) if _v is not None and isinstance(_v, str): params_to_check[i] = _v # Assign updated values back to parameters ( aws_access_key_id, aws_secret_access_key, aws_region_name, aws_session_name, aws_profile_name, aws_role_name, ) = params_to_check ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: sts_client = boto3.client( "sts", aws_access_key_id=aws_access_key_id, # [OPTIONAL] aws_secret_access_key=aws_secret_access_key, # [OPTIONAL] ) sts_response = sts_client.assume_role( RoleArn=aws_role_name, RoleSessionName=aws_session_name ) return sts_response["Credentials"] elif aws_profile_name is not None: ### CHECK SESSION ### # uses auth values from AWS profile usually stored in ~/.aws/credentials client = boto3.Session(profile_name=aws_profile_name) return client.get_credentials() else: session = boto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name, ) return session.get_credentials() def process_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> ModelResponse: ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise BedrockError(message=response.text, status_code=422) try: model_response.choices[0].message.content = completion_response["text"] # type: ignore except Exception as e: raise BedrockError(message=response.text, status_code=422) ## CALCULATING USAGE - bedrock returns usage in the headers prompt_tokens = int( response.headers.get( "x-amzn-bedrock-input-token-count", len(encoding.encode("".join(m.get("content", "") for m in messages))), ) ) completion_tokens = int( response.headers.get( "x-amzn-bedrock-output-token-count", len( encoding.encode( model_response.choices[0].message.content, # type: ignore disallowed_special=(), ) ), ) ) model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) setattr(model_response, "usage", usage) return model_response def completion( self, model: str, messages: list, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, logging_obj, optional_params: dict, acompletion: bool, timeout: Optional[Union[float, httpx.Timeout]], litellm_params=None, logger_fn=None, extra_headers: Optional[dict] = None, client: Optional[Union[AsyncHTTPHandler, HTTPHandler]] = None, ) -> Union[ModelResponse, CustomStreamWrapper]: try: import boto3 from botocore.auth import SigV4Auth from botocore.awsrequest import AWSRequest from botocore.credentials import Credentials except ImportError as e: raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") ## SETUP ## stream = optional_params.pop("stream", None) ## CREDENTIALS ## # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) aws_access_key_id = optional_params.pop("aws_access_key_id", None) aws_region_name = optional_params.pop("aws_region_name", None) aws_role_name = optional_params.pop("aws_role_name", None) aws_session_name = optional_params.pop("aws_session_name", None) aws_profile_name = optional_params.pop("aws_profile_name", None) aws_bedrock_runtime_endpoint = optional_params.pop( "aws_bedrock_runtime_endpoint", None ) # https://bedrock-runtime.{region_name}.amazonaws.com ### SET REGION NAME ### if aws_region_name is None: # check env # litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) if litellm_aws_region_name is not None and isinstance( litellm_aws_region_name, str ): aws_region_name = litellm_aws_region_name standard_aws_region_name = get_secret("AWS_REGION", None) if standard_aws_region_name is not None and isinstance( standard_aws_region_name, str ): aws_region_name = standard_aws_region_name if aws_region_name is None: aws_region_name = "us-west-2" credentials: Credentials = self.get_credentials( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_region_name=aws_region_name, aws_session_name=aws_session_name, aws_profile_name=aws_profile_name, aws_role_name=aws_role_name, ) ### SET RUNTIME ENDPOINT ### endpoint_url = "" env_aws_bedrock_runtime_endpoint = get_secret("AWS_BEDROCK_RUNTIME_ENDPOINT") if aws_bedrock_runtime_endpoint is not None and isinstance( aws_bedrock_runtime_endpoint, str ): endpoint_url = aws_bedrock_runtime_endpoint elif env_aws_bedrock_runtime_endpoint and isinstance( env_aws_bedrock_runtime_endpoint, str ): endpoint_url = env_aws_bedrock_runtime_endpoint else: endpoint_url = f"https://bedrock-runtime.{aws_region_name}.amazonaws.com" if stream is not None and stream == True: endpoint_url = f"{endpoint_url}/model/{model}/invoke-with-response-stream" else: endpoint_url = f"{endpoint_url}/model/{model}/invoke" sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name) provider = model.split(".")[0] prompt, chat_history = self.convert_messages_to_prompt( model, messages, provider, custom_prompt_dict ) inference_params = copy.deepcopy(optional_params) if provider == "cohere": if model.startswith("cohere.command-r"): ## LOAD CONFIG config = litellm.AmazonCohereChatConfig().get_config() for k, v in config.items(): if ( k not in inference_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in inference_params[k] = v _data = {"message": prompt, **inference_params} if chat_history is not None: _data["chat_history"] = chat_history data = json.dumps(_data) else: ## LOAD CONFIG config = litellm.AmazonCohereConfig.get_config() for k, v in config.items(): if ( k not in inference_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in inference_params[k] = v if stream == True: inference_params["stream"] = ( True # cohere requires stream = True in inference params ) data = json.dumps({"prompt": prompt, **inference_params}) else: raise Exception("UNSUPPORTED PROVIDER") ## COMPLETION CALL headers = {"Content-Type": "application/json"} if extra_headers is not None: headers = {"Content-Type": "application/json", **extra_headers} request = AWSRequest( method="POST", url=endpoint_url, data=data, headers=headers ) sigv4.add_auth(request) prepped = request.prepare() ## LOGGING logging_obj.pre_call( input=messages, api_key="", additional_args={ "complete_input_dict": data, "api_base": prepped.url, "headers": prepped.headers, }, ) ### ROUTING (ASYNC, STREAMING, SYNC) if acompletion: if isinstance(client, HTTPHandler): client = None if stream: return self.async_streaming( model=model, messages=messages, data=data, api_base=prepped.url, model_response=model_response, print_verbose=print_verbose, encoding=encoding, logging_obj=logging_obj, optional_params=optional_params, stream=True, litellm_params=litellm_params, logger_fn=logger_fn, headers=prepped.headers, timeout=timeout, client=client, ) # type: ignore ### ASYNC COMPLETION return self.async_completion( model=model, messages=messages, data=data, api_base=prepped.url, model_response=model_response, print_verbose=print_verbose, encoding=encoding, logging_obj=logging_obj, optional_params=optional_params, stream=False, litellm_params=litellm_params, logger_fn=logger_fn, headers=prepped.headers, timeout=timeout, client=client, ) # type: ignore if client is None or isinstance(client, AsyncHTTPHandler): _params = {} if timeout is not None: if isinstance(timeout, float) or isinstance(timeout, int): timeout = httpx.Timeout(timeout) _params["timeout"] = timeout self.client = HTTPHandler(**_params) # type: ignore else: self.client = client if stream is not None and stream == True: response = self.client.post( url=prepped.url, headers=prepped.headers, # type: ignore data=data, stream=stream, ) if response.status_code != 200: raise BedrockError( status_code=response.status_code, message=response.text ) decoder = AWSEventStreamDecoder() completion_stream = decoder.iter_bytes(response.iter_bytes(chunk_size=1024)) streaming_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="bedrock", logging_obj=logging_obj, ) return streaming_response response = self.client.post(url=prepped.url, headers=prepped.headers, data=data) # type: ignore try: response.raise_for_status() except httpx.HTTPStatusError as err: error_code = err.response.status_code raise BedrockError(status_code=error_code, message=response.text) return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, optional_params=optional_params, api_key="", data=data, messages=messages, print_verbose=print_verbose, encoding=encoding, ) async def async_completion( self, model: str, messages: list, api_base: str, model_response: ModelResponse, print_verbose: Callable, data: str, timeout: Optional[Union[float, httpx.Timeout]], encoding, logging_obj, stream, optional_params: dict, litellm_params=None, logger_fn=None, headers={}, client: Optional[AsyncHTTPHandler] = None, ) -> ModelResponse: if client is None: _params = {} if timeout is not None: if isinstance(timeout, float) or isinstance(timeout, int): timeout = httpx.Timeout(timeout) _params["timeout"] = timeout self.client = AsyncHTTPHandler(**_params) # type: ignore else: self.client = client # type: ignore response = await self.client.post(api_base, headers=headers, data=data) # type: ignore return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, api_key="", data=data, messages=messages, print_verbose=print_verbose, optional_params=optional_params, encoding=encoding, ) async def async_streaming( self, model: str, messages: list, api_base: str, model_response: ModelResponse, print_verbose: Callable, data: str, timeout: Optional[Union[float, httpx.Timeout]], encoding, logging_obj, stream, optional_params: dict, litellm_params=None, logger_fn=None, headers={}, client: Optional[AsyncHTTPHandler] = None, ) -> CustomStreamWrapper: if client is None: _params = {} if timeout is not None: if isinstance(timeout, float) or isinstance(timeout, int): timeout = httpx.Timeout(timeout) _params["timeout"] = timeout self.client = AsyncHTTPHandler(**_params) # type: ignore else: self.client = client # type: ignore response = await self.client.post(api_base, headers=headers, data=data, stream=True) # type: ignore if response.status_code != 200: raise BedrockError(status_code=response.status_code, message=response.text) decoder = AWSEventStreamDecoder() completion_stream = decoder.aiter_bytes(response.aiter_bytes(chunk_size=1024)) streaming_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="bedrock", logging_obj=logging_obj, ) return streaming_response def embedding(self, *args, **kwargs): return super().embedding(*args, **kwargs)
() -> None
63,004
litellm.llms.bedrock_httpx
async_completion
null
def completion( self, model: str, messages: list, custom_prompt_dict: dict, model_response: ModelResponse, print_verbose: Callable, encoding, logging_obj, optional_params: dict, acompletion: bool, timeout: Optional[Union[float, httpx.Timeout]], litellm_params=None, logger_fn=None, extra_headers: Optional[dict] = None, client: Optional[Union[AsyncHTTPHandler, HTTPHandler]] = None, ) -> Union[ModelResponse, CustomStreamWrapper]: try: import boto3 from botocore.auth import SigV4Auth from botocore.awsrequest import AWSRequest from botocore.credentials import Credentials except ImportError as e: raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") ## SETUP ## stream = optional_params.pop("stream", None) ## CREDENTIALS ## # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) aws_access_key_id = optional_params.pop("aws_access_key_id", None) aws_region_name = optional_params.pop("aws_region_name", None) aws_role_name = optional_params.pop("aws_role_name", None) aws_session_name = optional_params.pop("aws_session_name", None) aws_profile_name = optional_params.pop("aws_profile_name", None) aws_bedrock_runtime_endpoint = optional_params.pop( "aws_bedrock_runtime_endpoint", None ) # https://bedrock-runtime.{region_name}.amazonaws.com ### SET REGION NAME ### if aws_region_name is None: # check env # litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) if litellm_aws_region_name is not None and isinstance( litellm_aws_region_name, str ): aws_region_name = litellm_aws_region_name standard_aws_region_name = get_secret("AWS_REGION", None) if standard_aws_region_name is not None and isinstance( standard_aws_region_name, str ): aws_region_name = standard_aws_region_name if aws_region_name is None: aws_region_name = "us-west-2" credentials: Credentials = self.get_credentials( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_region_name=aws_region_name, aws_session_name=aws_session_name, aws_profile_name=aws_profile_name, aws_role_name=aws_role_name, ) ### SET RUNTIME ENDPOINT ### endpoint_url = "" env_aws_bedrock_runtime_endpoint = get_secret("AWS_BEDROCK_RUNTIME_ENDPOINT") if aws_bedrock_runtime_endpoint is not None and isinstance( aws_bedrock_runtime_endpoint, str ): endpoint_url = aws_bedrock_runtime_endpoint elif env_aws_bedrock_runtime_endpoint and isinstance( env_aws_bedrock_runtime_endpoint, str ): endpoint_url = env_aws_bedrock_runtime_endpoint else: endpoint_url = f"https://bedrock-runtime.{aws_region_name}.amazonaws.com" if stream is not None and stream == True: endpoint_url = f"{endpoint_url}/model/{model}/invoke-with-response-stream" else: endpoint_url = f"{endpoint_url}/model/{model}/invoke" sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name) provider = model.split(".")[0] prompt, chat_history = self.convert_messages_to_prompt( model, messages, provider, custom_prompt_dict ) inference_params = copy.deepcopy(optional_params) if provider == "cohere": if model.startswith("cohere.command-r"): ## LOAD CONFIG config = litellm.AmazonCohereChatConfig().get_config() for k, v in config.items(): if ( k not in inference_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in inference_params[k] = v _data = {"message": prompt, **inference_params} if chat_history is not None: _data["chat_history"] = chat_history data = json.dumps(_data) else: ## LOAD CONFIG config = litellm.AmazonCohereConfig.get_config() for k, v in config.items(): if ( k not in inference_params ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in inference_params[k] = v if stream == True: inference_params["stream"] = ( True # cohere requires stream = True in inference params ) data = json.dumps({"prompt": prompt, **inference_params}) else: raise Exception("UNSUPPORTED PROVIDER") ## COMPLETION CALL headers = {"Content-Type": "application/json"} if extra_headers is not None: headers = {"Content-Type": "application/json", **extra_headers} request = AWSRequest( method="POST", url=endpoint_url, data=data, headers=headers ) sigv4.add_auth(request) prepped = request.prepare() ## LOGGING logging_obj.pre_call( input=messages, api_key="", additional_args={ "complete_input_dict": data, "api_base": prepped.url, "headers": prepped.headers, }, ) ### ROUTING (ASYNC, STREAMING, SYNC) if acompletion: if isinstance(client, HTTPHandler): client = None if stream: return self.async_streaming( model=model, messages=messages, data=data, api_base=prepped.url, model_response=model_response, print_verbose=print_verbose, encoding=encoding, logging_obj=logging_obj, optional_params=optional_params, stream=True, litellm_params=litellm_params, logger_fn=logger_fn, headers=prepped.headers, timeout=timeout, client=client, ) # type: ignore ### ASYNC COMPLETION return self.async_completion( model=model, messages=messages, data=data, api_base=prepped.url, model_response=model_response, print_verbose=print_verbose, encoding=encoding, logging_obj=logging_obj, optional_params=optional_params, stream=False, litellm_params=litellm_params, logger_fn=logger_fn, headers=prepped.headers, timeout=timeout, client=client, ) # type: ignore if client is None or isinstance(client, AsyncHTTPHandler): _params = {} if timeout is not None: if isinstance(timeout, float) or isinstance(timeout, int): timeout = httpx.Timeout(timeout) _params["timeout"] = timeout self.client = HTTPHandler(**_params) # type: ignore else: self.client = client if stream is not None and stream == True: response = self.client.post( url=prepped.url, headers=prepped.headers, # type: ignore data=data, stream=stream, ) if response.status_code != 200: raise BedrockError( status_code=response.status_code, message=response.text ) decoder = AWSEventStreamDecoder() completion_stream = decoder.iter_bytes(response.iter_bytes(chunk_size=1024)) streaming_response = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="bedrock", logging_obj=logging_obj, ) return streaming_response response = self.client.post(url=prepped.url, headers=prepped.headers, data=data) # type: ignore try: response.raise_for_status() except httpx.HTTPStatusError as err: error_code = err.response.status_code raise BedrockError(status_code=error_code, message=response.text) return self.process_response( model=model, response=response, model_response=model_response, stream=stream, logging_obj=logging_obj, optional_params=optional_params, api_key="", data=data, messages=messages, print_verbose=print_verbose, encoding=encoding, )
(self, model: str, messages: list, api_base: str, model_response: litellm.utils.ModelResponse, print_verbose: Callable, data: str, timeout: Union[float, openai.Timeout, NoneType], encoding, logging_obj, stream, optional_params: dict, litellm_params=None, logger_fn=None, headers={}, client: Optional[litellm.llms.custom_httpx.http_handler.AsyncHTTPHandler] = None) -> litellm.utils.ModelResponse
63,007
litellm.llms.bedrock_httpx
convert_messages_to_prompt
null
def convert_messages_to_prompt( self, model, messages, provider, custom_prompt_dict ) -> Tuple[str, Optional[list]]: # handle anthropic prompts and amazon titan prompts prompt = "" chat_history: Optional[list] = None if provider == "anthropic" or provider == "amazon": if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details["roles"], initial_prompt_value=model_prompt_details["initial_prompt_value"], final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) else: prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "mistral": prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "meta": prompt = prompt_factory( model=model, messages=messages, custom_llm_provider="bedrock" ) elif provider == "cohere": prompt, chat_history = cohere_message_pt(messages=messages) else: prompt = "" for message in messages: if "role" in message: if message["role"] == "user": prompt += f"{message['content']}" else: prompt += f"{message['content']}" else: prompt += f"{message['content']}" return prompt, chat_history # type: ignore
(self, model, messages, provider, custom_prompt_dict) -> Tuple[str, Optional[list]]
63,010
litellm.llms.bedrock_httpx
embedding
null
def embedding(self, *args, **kwargs): return super().embedding(*args, **kwargs)
(self, *args, **kwargs)
63,011
litellm.llms.bedrock_httpx
get_credentials
Return a boto3.Credentials object
def get_credentials( self, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, ): """ Return a boto3.Credentials object """ import boto3 ## CHECK IS 'os.environ/' passed in params_to_check: List[Optional[str]] = [ aws_access_key_id, aws_secret_access_key, aws_region_name, aws_session_name, aws_profile_name, aws_role_name, ] # Iterate over parameters and update if needed for i, param in enumerate(params_to_check): if param and param.startswith("os.environ/"): _v = get_secret(param) if _v is not None and isinstance(_v, str): params_to_check[i] = _v # Assign updated values back to parameters ( aws_access_key_id, aws_secret_access_key, aws_region_name, aws_session_name, aws_profile_name, aws_role_name, ) = params_to_check ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: sts_client = boto3.client( "sts", aws_access_key_id=aws_access_key_id, # [OPTIONAL] aws_secret_access_key=aws_secret_access_key, # [OPTIONAL] ) sts_response = sts_client.assume_role( RoleArn=aws_role_name, RoleSessionName=aws_session_name ) return sts_response["Credentials"] elif aws_profile_name is not None: ### CHECK SESSION ### # uses auth values from AWS profile usually stored in ~/.aws/credentials client = boto3.Session(profile_name=aws_profile_name) return client.get_credentials() else: session = boto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name, ) return session.get_credentials()
(self, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None)
63,012
litellm.llms.bedrock_httpx
process_response
null
def process_response( self, model: str, response: Union[requests.Response, httpx.Response], model_response: ModelResponse, stream: bool, logging_obj: Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding, ) -> ModelResponse: ## LOGGING logging_obj.post_call( input=messages, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response.text}") ## RESPONSE OBJECT try: completion_response = response.json() except: raise BedrockError(message=response.text, status_code=422) try: model_response.choices[0].message.content = completion_response["text"] # type: ignore except Exception as e: raise BedrockError(message=response.text, status_code=422) ## CALCULATING USAGE - bedrock returns usage in the headers prompt_tokens = int( response.headers.get( "x-amzn-bedrock-input-token-count", len(encoding.encode("".join(m.get("content", "") for m in messages))), ) ) completion_tokens = int( response.headers.get( "x-amzn-bedrock-output-token-count", len( encoding.encode( model_response.choices[0].message.content, # type: ignore disallowed_special=(), ) ), ) ) model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) setattr(model_response, "usage", usage) return model_response
(self, model: str, response: Union[requests.models.Response, httpx.Response], model_response: litellm.utils.ModelResponse, stream: bool, logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, data: Union[dict, str], messages: List, print_verbose, encoding) -> litellm.utils.ModelResponse
63,014
typing
BinaryIO
Typed version of the return of open() in binary mode.
class BinaryIO(IO[bytes]): """Typed version of the return of open() in binary mode.""" __slots__ = () @abstractmethod def write(self, s: Union[bytes, bytearray]) -> int: pass @abstractmethod def __enter__(self) -> 'BinaryIO': pass
()
63,015
typing
__enter__
null
@abstractmethod def __enter__(self) -> 'BinaryIO': pass
(self) -> <class 'BinaryIO'>
63,030
typing
write
null
@abstractmethod def write(self, s: Union[bytes, bytearray]) -> int: pass
(self, s: Union[bytes, bytearray]) -> int
63,032
litellm.exceptions
BudgetExceededError
null
class BudgetExceededError(Exception): def __init__(self, current_cost, max_budget): self.current_cost = current_cost self.max_budget = max_budget message = f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}" super().__init__(message)
(current_cost, max_budget)
63,033
litellm.exceptions
__init__
null
def __init__(self, current_cost, max_budget): self.current_cost = current_cost self.max_budget = max_budget message = f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}" super().__init__(message)
(self, current_cost, max_budget)
63,034
litellm.budget_manager
BudgetManager
null
class BudgetManager: def __init__( self, project_name: str, client_type: str = "local", api_base: Optional[str] = None, headers: Optional[dict] = None, ): self.client_type = client_type self.project_name = project_name self.api_base = api_base or "https://api.litellm.ai" self.headers = headers or {"Content-Type": "application/json"} ## load the data or init the initial dictionaries self.load_data() def print_verbose(self, print_statement): try: if litellm.set_verbose: import logging logging.info(print_statement) except: pass def load_data(self): if self.client_type == "local": # Check if user dict file exists if os.path.isfile("user_cost.json"): # Load the user dict with open("user_cost.json", "r") as json_file: self.user_dict = json.load(json_file) else: self.print_verbose("User Dictionary not found!") self.user_dict = {} self.print_verbose(f"user dict from local: {self.user_dict}") elif self.client_type == "hosted": # Load the user_dict from hosted db url = self.api_base + "/get_budget" headers = {"Content-Type": "application/json"} data = {"project_name": self.project_name} response = requests.post(url, headers=self.headers, json=data) response = response.json() if response["status"] == "error": self.user_dict = ( {} ) # assume this means the user dict hasn't been stored yet else: self.user_dict = response["data"] def create_budget( self, total_budget: float, user: str, duration: Optional[Literal["daily", "weekly", "monthly", "yearly"]] = None, created_at: float = time.time(), ): self.user_dict[user] = {"total_budget": total_budget} if duration is None: return self.user_dict[user] if duration == "daily": duration_in_days = 1 elif duration == "weekly": duration_in_days = 7 elif duration == "monthly": duration_in_days = 28 elif duration == "yearly": duration_in_days = 365 else: raise ValueError( """duration needs to be one of ["daily", "weekly", "monthly", "yearly"]""" ) self.user_dict[user] = { "total_budget": total_budget, "duration": duration_in_days, "created_at": created_at, "last_updated_at": created_at, } self._save_data_thread() # [Non-Blocking] Update persistent storage without blocking execution return self.user_dict[user] def projected_cost(self, model: str, messages: list, user: str): text = "".join(message["content"] for message in messages) prompt_tokens = litellm.token_counter(model=model, text=text) prompt_cost, _ = litellm.cost_per_token( model=model, prompt_tokens=prompt_tokens, completion_tokens=0 ) current_cost = self.user_dict[user].get("current_cost", 0) projected_cost = prompt_cost + current_cost return projected_cost def get_total_budget(self, user: str): return self.user_dict[user]["total_budget"] def update_cost( self, user: str, completion_obj: Optional[ModelResponse] = None, model: Optional[str] = None, input_text: Optional[str] = None, output_text: Optional[str] = None, ): if model and input_text and output_text: prompt_tokens = litellm.token_counter( model=model, messages=[{"role": "user", "content": input_text}] ) completion_tokens = litellm.token_counter( model=model, messages=[{"role": "user", "content": output_text}] ) ( prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar, ) = litellm.cost_per_token( model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, ) cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar elif completion_obj: cost = litellm.completion_cost(completion_response=completion_obj) model = completion_obj[ "model" ] # if this throws an error try, model = completion_obj['model'] else: raise ValueError( "Either a chat completion object or the text response needs to be passed in. Learn more - https://docs.litellm.ai/docs/budget_manager" ) self.user_dict[user]["current_cost"] = cost + self.user_dict[user].get( "current_cost", 0 ) if "model_cost" in self.user_dict[user]: self.user_dict[user]["model_cost"][model] = cost + self.user_dict[user][ "model_cost" ].get(model, 0) else: self.user_dict[user]["model_cost"] = {model: cost} self._save_data_thread() # [Non-Blocking] Update persistent storage without blocking execution return {"user": self.user_dict[user]} def get_current_cost(self, user): return self.user_dict[user].get("current_cost", 0) def get_model_cost(self, user): return self.user_dict[user].get("model_cost", 0) def is_valid_user(self, user: str) -> bool: return user in self.user_dict def get_users(self): return list(self.user_dict.keys()) def reset_cost(self, user): self.user_dict[user]["current_cost"] = 0 self.user_dict[user]["model_cost"] = {} return {"user": self.user_dict[user]} def reset_on_duration(self, user: str): # Get current and creation time last_updated_at = self.user_dict[user]["last_updated_at"] current_time = time.time() # Convert duration from days to seconds duration_in_seconds = self.user_dict[user]["duration"] * 24 * 60 * 60 # Check if duration has elapsed if current_time - last_updated_at >= duration_in_seconds: # Reset cost if duration has elapsed and update the creation time self.reset_cost(user) self.user_dict[user]["last_updated_at"] = current_time self._save_data_thread() # Save the data def update_budget_all_users(self): for user in self.get_users(): if "duration" in self.user_dict[user]: self.reset_on_duration(user) def _save_data_thread(self): thread = threading.Thread( target=self.save_data ) # [Non-Blocking]: saves data without blocking execution thread.start() def save_data(self): if self.client_type == "local": import json # save the user dict with open("user_cost.json", "w") as json_file: json.dump( self.user_dict, json_file, indent=4 ) # Indent for pretty formatting return {"status": "success"} elif self.client_type == "hosted": url = self.api_base + "/set_budget" headers = {"Content-Type": "application/json"} data = {"project_name": self.project_name, "user_dict": self.user_dict} response = requests.post(url, headers=self.headers, json=data) response = response.json() return response
(project_name: str, client_type: str = 'local', api_base: Optional[str] = None, headers: Optional[dict] = None)
63,035
litellm.budget_manager
__init__
null
def __init__( self, project_name: str, client_type: str = "local", api_base: Optional[str] = None, headers: Optional[dict] = None, ): self.client_type = client_type self.project_name = project_name self.api_base = api_base or "https://api.litellm.ai" self.headers = headers or {"Content-Type": "application/json"} ## load the data or init the initial dictionaries self.load_data()
(self, project_name: str, client_type: str = 'local', api_base: Optional[str] = None, headers: Optional[dict] = None)
63,036
litellm.budget_manager
_save_data_thread
null
def _save_data_thread(self): thread = threading.Thread( target=self.save_data ) # [Non-Blocking]: saves data without blocking execution thread.start()
(self)
63,037
litellm.budget_manager
create_budget
null
def create_budget( self, total_budget: float, user: str, duration: Optional[Literal["daily", "weekly", "monthly", "yearly"]] = None, created_at: float = time.time(), ): self.user_dict[user] = {"total_budget": total_budget} if duration is None: return self.user_dict[user] if duration == "daily": duration_in_days = 1 elif duration == "weekly": duration_in_days = 7 elif duration == "monthly": duration_in_days = 28 elif duration == "yearly": duration_in_days = 365 else: raise ValueError( """duration needs to be one of ["daily", "weekly", "monthly", "yearly"]""" ) self.user_dict[user] = { "total_budget": total_budget, "duration": duration_in_days, "created_at": created_at, "last_updated_at": created_at, } self._save_data_thread() # [Non-Blocking] Update persistent storage without blocking execution return self.user_dict[user]
(self, total_budget: float, user: str, duration: Optional[Literal['daily', 'weekly', 'monthly', 'yearly']] = None, created_at: float = 1715501035.4150069)
63,038
litellm.budget_manager
get_current_cost
null
def get_current_cost(self, user): return self.user_dict[user].get("current_cost", 0)
(self, user)
63,039
litellm.budget_manager
get_model_cost
null
def get_model_cost(self, user): return self.user_dict[user].get("model_cost", 0)
(self, user)
63,040
litellm.budget_manager
get_total_budget
null
def get_total_budget(self, user: str): return self.user_dict[user]["total_budget"]
(self, user: str)
63,041
litellm.budget_manager
get_users
null
def get_users(self): return list(self.user_dict.keys())
(self)
63,042
litellm.budget_manager
is_valid_user
null
def is_valid_user(self, user: str) -> bool: return user in self.user_dict
(self, user: str) -> bool
63,043
litellm.budget_manager
load_data
null
def load_data(self): if self.client_type == "local": # Check if user dict file exists if os.path.isfile("user_cost.json"): # Load the user dict with open("user_cost.json", "r") as json_file: self.user_dict = json.load(json_file) else: self.print_verbose("User Dictionary not found!") self.user_dict = {} self.print_verbose(f"user dict from local: {self.user_dict}") elif self.client_type == "hosted": # Load the user_dict from hosted db url = self.api_base + "/get_budget" headers = {"Content-Type": "application/json"} data = {"project_name": self.project_name} response = requests.post(url, headers=self.headers, json=data) response = response.json() if response["status"] == "error": self.user_dict = ( {} ) # assume this means the user dict hasn't been stored yet else: self.user_dict = response["data"]
(self)
63,044
litellm.budget_manager
print_verbose
null
def print_verbose(self, print_statement): try: if litellm.set_verbose: import logging logging.info(print_statement) except: pass
(self, print_statement)