index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
63,338 |
litellm.llms.watsonx
|
__init__
| null |
def __init__(
self,
decoding_method: Optional[str] = None,
temperature: Optional[float] = None,
max_new_tokens: Optional[int] = None,
min_new_tokens: Optional[int] = None,
length_penalty: Optional[dict] = None,
stop_sequences: Optional[List[str]] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
truncate_input_tokens: Optional[int] = None,
include_stop_sequences: Optional[bool] = None,
return_options: Optional[dict] = None,
random_seed: Optional[int] = None,
moderations: Optional[dict] = None,
stream: Optional[bool] = None,
**kwargs,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
|
(self, decoding_method: Optional[str] = None, temperature: Optional[float] = None, max_new_tokens: Optional[int] = None, min_new_tokens: Optional[int] = None, length_penalty: Optional[dict] = None, stop_sequences: Optional[List[str]] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, truncate_input_tokens: Optional[int] = None, include_stop_sequences: Optional[bool] = None, return_options: Optional[dict] = None, random_seed: Optional[int] = None, moderations: Optional[dict] = None, stream: Optional[bool] = None, **kwargs) -> NoneType
|
63,339 |
litellm.llms.watsonx
|
get_eu_regions
|
Source: https://www.ibm.com/docs/en/watsonx/saas?topic=integrations-regional-availability
|
def get_eu_regions(self) -> List[str]:
"""
Source: https://www.ibm.com/docs/en/watsonx/saas?topic=integrations-regional-availability
"""
return [
"eu-de",
"eu-gb",
]
|
(self) -> List[str]
|
63,340 |
litellm.llms.watsonx
|
get_mapped_special_auth_params
|
Common auth params across bedrock/vertex_ai/azure/watsonx
|
def get_mapped_special_auth_params(self) -> dict:
"""
Common auth params across bedrock/vertex_ai/azure/watsonx
"""
return {
"project": "watsonx_project",
"region_name": "watsonx_region_name",
"token": "watsonx_token",
}
|
(self) -> dict
|
63,341 |
litellm.llms.watsonx
|
get_supported_openai_params
| null |
def get_supported_openai_params(self):
return [
"temperature", # equivalent to temperature
"max_tokens", # equivalent to max_new_tokens
"top_p", # equivalent to top_p
"frequency_penalty", # equivalent to repetition_penalty
"stop", # equivalent to stop_sequences
"seed", # equivalent to random_seed
"stream", # equivalent to stream
]
|
(self)
|
63,343 |
litellm.utils
|
ImageResponse
| null |
class ImageResponse(OpenAIObject):
created: Optional[int] = None
data: Optional[list] = None
usage: Optional[dict] = None
_hidden_params: dict = {}
def __init__(self, created=None, data=None, response_ms=None):
if response_ms:
_response_ms = response_ms
else:
_response_ms = None
if data:
data = data
else:
data = None
if created:
created = created
else:
created = None
super().__init__(data=data, created=created)
self.usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
def json(self, **kwargs):
try:
return self.model_dump() # noqa
except:
# if using pydantic v1
return self.dict()
|
(created=None, data=None, response_ms=None)
|
63,352 |
litellm.utils
|
__init__
| null |
def __init__(self, created=None, data=None, response_ms=None):
if response_ms:
_response_ms = response_ms
else:
_response_ms = None
if data:
data = data
else:
data = None
if created:
created = created
else:
created = None
super().__init__(data=data, created=created)
self.usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
(self, created=None, data=None, response_ms=None)
|
63,378 |
litellm.exceptions
|
InvalidRequestError
| null |
class InvalidRequestError(openai.BadRequestError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, f"{self.model}"
) # Call the base class constructor with the parameters it needs
|
(message, model, llm_provider)
|
63,379 |
litellm.exceptions
|
__init__
| null |
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, f"{self.model}"
) # Call the base class constructor with the parameters it needs
|
(self, message, model, llm_provider)
|
63,380 |
litellm.proxy._types
|
KeyManagementSettings
| null |
class KeyManagementSettings(LiteLLMBase):
hosted_keys: List
|
(*, hosted_keys: List) -> None
|
63,404 |
litellm.proxy._types
|
fields_set
| null |
def fields_set(self):
try:
return self.model_fields_set # noqa
except:
# if using pydantic v1
return self.__fields_set__
|
(self)
|
63,405 |
litellm.proxy._types
|
json
| null |
def json(self, **kwargs):
try:
return self.model_dump(**kwargs) # noqa
except Exception as e:
# if using pydantic v1
return self.dict(**kwargs)
|
(self, **kwargs)
|
63,410 |
litellm.proxy._types
|
KeyManagementSystem
|
An enumeration.
|
class KeyManagementSystem(enum.Enum):
GOOGLE_KMS = "google_kms"
AZURE_KEY_VAULT = "azure_key_vault"
AWS_SECRET_MANAGER = "aws_secret_manager"
LOCAL = "local"
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
63,411 |
litellm.main
|
LiteLLM
| null |
class LiteLLM:
def __init__(
self,
*,
api_key=None,
organization: Optional[str] = None,
base_url: Optional[str] = None,
timeout: Optional[float] = 600,
max_retries: Optional[int] = litellm.num_retries,
default_headers: Optional[Mapping[str, str]] = None,
):
self.params = locals()
self.chat = Chat(self.params, router_obj=None)
|
(*, api_key=None, organization: Optional[str] = None, base_url: Optional[str] = None, timeout: Optional[float] = 600, max_retries: Optional[int] = None, default_headers: Optional[Mapping[str, str]] = None)
|
63,412 |
litellm.main
|
__init__
| null |
def __init__(
self,
*,
api_key=None,
organization: Optional[str] = None,
base_url: Optional[str] = None,
timeout: Optional[float] = 600,
max_retries: Optional[int] = litellm.num_retries,
default_headers: Optional[Mapping[str, str]] = None,
):
self.params = locals()
self.chat = Chat(self.params, router_obj=None)
|
(self, *, api_key=None, organization: Optional[str] = None, base_url: Optional[str] = None, timeout: Optional[float] = 600, max_retries: Optional[int] = None, default_headers: Optional[Mapping[str, str]] = None)
|
63,413 |
litellm.types.router
|
LiteLLM_Params
|
LiteLLM Params with 'model' requirement - used for completions
|
class LiteLLM_Params(GenericLiteLLMParams):
"""
LiteLLM Params with 'model' requirement - used for completions
"""
model: str
def __init__(
self,
model: str,
custom_llm_provider: Optional[str] = None,
max_retries: Optional[Union[int, str]] = None,
tpm: Optional[int] = None,
rpm: Optional[int] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
timeout: Optional[Union[float, str]] = None, # if str, pass in as os.environ/
stream_timeout: Optional[Union[float, str]] = (
None # timeout when making stream=True calls, if str, pass in as os.environ/
),
organization: Optional[str] = None, # for openai orgs
## VERTEX AI ##
vertex_project: Optional[str] = None,
vertex_location: Optional[str] = None,
## AWS BEDROCK / SAGEMAKER ##
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_region_name: Optional[str] = None,
**params
):
args = locals()
args.pop("max_retries", None)
args.pop("self", None)
args.pop("params", None)
args.pop("__class__", None)
if max_retries is not None and isinstance(max_retries, str):
max_retries = int(max_retries) # cast to int
super().__init__(max_retries=max_retries, **args, **params)
class Config:
extra = "allow"
arbitrary_types_allowed = True
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
|
(model: str, custom_llm_provider: Optional[str] = None, max_retries: Optional[int] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Union[float, str, openai.Timeout, NoneType] = None, stream_timeout: Union[float, str, NoneType] = None, organization: Optional[str] = None, vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, *, region_name: Optional[str] = None, watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, output_cost_per_second: Optional[float] = None, **params) -> None
|
63,422 |
litellm.types.router
|
__init__
| null |
def __init__(
self,
model: str,
custom_llm_provider: Optional[str] = None,
max_retries: Optional[Union[int, str]] = None,
tpm: Optional[int] = None,
rpm: Optional[int] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
timeout: Optional[Union[float, str]] = None, # if str, pass in as os.environ/
stream_timeout: Optional[Union[float, str]] = (
None # timeout when making stream=True calls, if str, pass in as os.environ/
),
organization: Optional[str] = None, # for openai orgs
## VERTEX AI ##
vertex_project: Optional[str] = None,
vertex_location: Optional[str] = None,
## AWS BEDROCK / SAGEMAKER ##
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_region_name: Optional[str] = None,
**params
):
args = locals()
args.pop("max_retries", None)
args.pop("self", None)
args.pop("params", None)
args.pop("__class__", None)
if max_retries is not None and isinstance(max_retries, str):
max_retries = int(max_retries) # cast to int
super().__init__(max_retries=max_retries, **args, **params)
|
(self, model: str, custom_llm_provider: Optional[str] = None, max_retries: Union[str, int, NoneType] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Union[float, str, NoneType] = None, stream_timeout: Union[float, str, NoneType] = None, organization: Optional[str] = None, vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, **params)
|
63,446 |
litellm.proxy._types
|
LiteLLM_UpperboundKeyGenerateParams
|
Set default upperbound to max budget a key called via `/key/generate` can be.
|
class LiteLLM_UpperboundKeyGenerateParams(LiteLLMBase):
"""
Set default upperbound to max budget a key called via `/key/generate` can be.
"""
max_budget: Optional[float] = None
budget_duration: Optional[str] = None
max_parallel_requests: Optional[int] = None
tpm_limit: Optional[int] = None
rpm_limit: Optional[int] = None
|
(*, max_budget: Optional[float] = None, budget_duration: Optional[str] = None, max_parallel_requests: Optional[int] = None, tpm_limit: Optional[int] = None, rpm_limit: Optional[int] = None) -> None
|
63,476 |
litellm.utils
|
Logging
| null |
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, lunaryLogger
def __init__(
self,
model,
messages,
stream,
call_type,
start_time,
litellm_call_id,
function_id,
dynamic_success_callbacks=None,
dynamic_failure_callbacks=None,
dynamic_async_success_callbacks=None,
langfuse_public_key=None,
langfuse_secret=None,
):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(
f"Invalid call_type {call_type}. Allowed values: {allowed_values}"
)
if messages is not None:
if isinstance(messages, str):
messages = [
{"role": "user", "content": messages}
] # convert text completion input to the chat completion format
elif (
isinstance(messages, list)
and len(messages) > 0
and isinstance(messages[0], str)
):
new_messages = []
for m in messages:
new_messages.append({"role": "user", "content": m})
messages = new_messages
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
self.sync_streaming_chunks = [] # for generating complete stream response
self.model_call_details = {}
self.dynamic_input_callbacks = [] # [TODO] callbacks set for just that call
self.dynamic_failure_callbacks = dynamic_failure_callbacks
self.dynamic_success_callbacks = (
dynamic_success_callbacks # callbacks set for just that call
)
self.dynamic_async_success_callbacks = (
dynamic_async_success_callbacks # callbacks set for just that call
)
## DYNAMIC LANGFUSE KEYS ##
self.langfuse_public_key = langfuse_public_key
self.langfuse_secret = langfuse_secret
## TIME TO FIRST TOKEN LOGGING ##
self.completion_start_time: Optional[datetime.datetime] = None
def update_environment_variables(
self, model, user, optional_params, litellm_params, **additional_params
):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params.get("logger_fn", None)
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time,
"stream": self.stream,
"user": user,
"call_type": str(self.call_type),
"litellm_call_id": self.litellm_call_id,
"completion_start_time": self.completion_start_time,
**self.optional_params,
**additional_params,
}
def _pre_call(self, input, api_key, model=None, additional_args={}):
"""
Common helper function across the sync + async pre-call function
"""
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
litellm.error_logs["PRE_CALL"] = locals()
try:
self._pre_call(
input=input,
api_key=api_key,
model=model,
additional_args=additional_args,
)
# User Logging -> if you pass in a custom logging function
headers = additional_args.get("headers", {})
if headers is None:
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = additional_args.get("api_base", "")
self.model_call_details["litellm_params"]["api_base"] = str(
api_base
) # used for alerting
masked_headers = {
k: (v[:-20] + "*" * 20) if (isinstance(v, str) and len(v) > 20) else v
for k, v in headers.items()
}
formatted_headers = " ".join(
[f"-H '{k}: {v}'" for k, v in masked_headers.items()]
)
verbose_logger.debug(f"PRE-API-CALL ADDITIONAL ARGS: {additional_args}")
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
curl_command += f"{api_base} \\\n"
curl_command += (
f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else ""
)
curl_command += f"-d '{str(data)}'\n"
if additional_args.get("request_str", None) is not None:
# print the sagemaker / bedrock client request
curl_command = "\nRequest Sent from LiteLLM:\n"
curl_command += additional_args.get("request_str", None)
elif api_base == "":
curl_command = self.model_call_details
# only print verbose if verbose logger is not set
if verbose_logger.level == 0:
# this means verbose logger was not switched on - user is in litellm.set_verbose=True
print_verbose(f"\033[92m{curl_command}\033[0m\n")
if litellm.json_logs:
verbose_logger.info(
"POST Request Sent from LiteLLM",
extra={"api_base": {api_base}, **masked_headers},
)
else:
verbose_logger.info(f"\033[92m{curl_command}\033[0m\n")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
callbacks = litellm.input_callback + self.dynamic_input_callbacks
for callback in callbacks:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "sentry" and add_breadcrumb:
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
details_to_log.pop("messages", None)
details_to_log.pop("input", None)
details_to_log.pop("prompt", None)
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {details_to_log}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_pre_api_call(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
def post_call(
self, original_response, input=None, api_key=None, additional_args={}
):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs["POST_CALL"] = locals()
if isinstance(original_response, dict):
original_response = json.dumps(original_response)
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(
f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n",
log_level="DEBUG",
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
self.redact_message_input_output_from_logging(result=original_response)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
callbacks = litellm.input_callback + self.dynamic_input_callbacks
for callback in callbacks:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
details_to_log.pop("messages", None)
details_to_log.pop("input", None)
details_to_log.pop("prompt", None)
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {details_to_log}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_post_api_call(
kwargs=self.model_call_details,
response_obj=None,
start_time=self.start_time,
end_time=None,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def _success_handler_helper_fn(
self, result=None, start_time=None, end_time=None, cache_hit=None
):
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
if self.completion_start_time is None:
self.completion_start_time = end_time
self.model_call_details["completion_start_time"] = (
self.completion_start_time
)
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
self.model_call_details["cache_hit"] = cache_hit
## if model in model cost map - log the response cost
## else set cost to None
verbose_logger.debug(f"Model={self.model};")
if (
result is not None
and (
isinstance(result, ModelResponse)
or isinstance(result, EmbeddingResponse)
or isinstance(result, ImageResponse)
or isinstance(result, TranscriptionResponse)
or isinstance(result, TextCompletionResponse)
)
and self.stream != True
): # handle streaming separately
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
result._hidden_params["optional_params"] = self.optional_params
if (
self.call_type == CallTypes.aimage_generation.value
or self.call_type == CallTypes.image_generation.value
):
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=result,
model=self.model,
call_type=self.call_type,
custom_llm_provider=self.model_call_details.get(
"custom_llm_provider", None
), # set for img gen models
)
)
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=result,
call_type=self.call_type,
model=base_model,
)
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
else: # streaming chunks + image gen.
self.model_call_details["response_cost"] = None
if (
litellm.max_budget
and self.stream == False
and result is not None
and "content" in result
):
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(
model=self.model,
prompt="",
completion=result["content"],
total_time=float_diff,
)
return start_time, end_time, result
except Exception as e:
raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}")
def success_handler(
self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs
):
print_verbose(f"Logging Details LiteLLM-Success Call: {cache_hit}")
start_time, end_time, result = self._success_handler_helper_fn(
start_time=start_time,
end_time=end_time,
result=result,
cache_hit=cache_hit,
)
# print(f"original response in success handler: {self.model_call_details['original_response']}")
try:
print_verbose(f"success callbacks: {litellm.success_callback}")
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream and isinstance(result, ModelResponse):
if (
result.choices[0].finish_reason is not None
): # if it's the last chunk
self.sync_streaming_chunks.append(result)
# print_verbose(f"final set of received chunks: {self.sync_streaming_chunks}")
try:
complete_streaming_response = litellm.stream_chunk_builder(
self.sync_streaming_chunks,
messages=self.model_call_details.get("messages", None),
start_time=start_time,
end_time=end_time,
)
except Exception as e:
complete_streaming_response = None
else:
self.sync_streaming_chunks.append(result)
if complete_streaming_response is not None:
print_verbose(
f"Logging Details LiteLLM-Success Call streaming complete"
)
self.model_call_details["complete_streaming_response"] = (
complete_streaming_response
)
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=complete_streaming_response,
model=base_model,
)
)
verbose_logger.debug(
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
if self.dynamic_success_callbacks is not None and isinstance(
self.dynamic_success_callbacks, list
):
callbacks = self.dynamic_success_callbacks
## keep the internal functions ##
for callback in litellm.success_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.success_callback
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
litellm_params = self.model_call_details.get("litellm_params", {})
if litellm_params.get("no-log", False) == True:
# proxy cost tracking cal backs should run
if not (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
print_verbose("no-log request, skipping logging")
continue
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(
f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}"
)
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs = self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get(
"litellm_call_id", str(uuid.uuid4())
),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmith for logging!")
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
print_verbose(
"reaches langsmith for streaming logging!"
)
result = kwargs["complete_streaming_response"]
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "lunary":
print_verbose("reaches lunary for logging!")
model = self.model
kwargs = self.model_call_details
input = kwargs.get("messages", kwargs.get("input", None))
type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
result = kwargs["complete_streaming_response"]
lunaryLogger.log_event(
type=type,
kwargs=kwargs,
event="end",
model=model,
input=input,
user_id=kwargs.get("user", None),
# user_props=self.model_call_details.get("user_props", None),
extra=kwargs.get("optional_params", {}),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["input"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "datadog":
global dataDogLogger
verbose_logger.debug("reaches datadog for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"datadog: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches datadog for streaming logging!")
result = kwargs["complete_streaming_response"]
dataDogLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"prometheus: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose(
"reaches prometheus for streaming logging!"
)
result = kwargs["complete_streaming_response"]
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "generic":
global genericAPILogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if genericAPILogger is None:
genericAPILogger = GenericAPILogger()
genericAPILogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "clickhouse":
global clickHouseLogger
verbose_logger.debug("reaches clickhouse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches clickhouse for streaming logging!"
)
result = kwargs["complete_streaming_response"]
if clickHouseLogger is None:
clickHouseLogger = ClickhouseLogger()
clickHouseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "greenscale":
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches greenscale for streaming logging!"
)
result = kwargs["complete_streaming_response"]
greenscaleLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "cache" and litellm.cache is not None:
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
print_verbose("success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "complete_streaming_response" not in kwargs:
print_verbose(
f"success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n"
)
pass
else:
print_verbose(
"success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache"
)
result = kwargs["complete_streaming_response"]
# only add to cache once we have a complete streaming response
litellm.cache.add_cache(result, **kwargs)
if callback == "athina":
deep_copy = {}
for k, v in self.model_call_details.items():
deep_copy[k] = v
athinaLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "s3":
global s3Logger
if s3Logger is None:
s3Logger = S3Logger()
if self.stream:
if "complete_streaming_response" in self.model_call_details:
print_verbose(
"S3Logger Logger: Got Stream Event - Completed Stream Response"
)
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
else:
print_verbose(
"S3Logger Logger: Got Stream Event - No complete stream response as yet"
)
else:
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if (
callback == "openmeter"
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
):
global openMeterLogger
if openMeterLogger is None:
print_verbose("Instantiates openmeter client")
openMeterLogger = OpenMeterLogger()
if self.stream and complete_streaming_response is None:
openMeterLogger.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
openMeterLogger.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
callable(callback) == True
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger functions
print_verbose(
f"success callbacks: Running Custom Callback Function"
)
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
async def async_success_handler(
self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs
):
"""
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
"""
print_verbose(f"Logging Details LiteLLM-Async Success Call: {cache_hit}")
start_time, end_time, result = self._success_handler_helper_fn(
start_time=start_time, end_time=end_time, result=result, cache_hit=cache_hit
)
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream:
if result.choices[0].finish_reason is not None: # if it's the last chunk
self.streaming_chunks.append(result)
# verbose_logger.debug(f"final set of received chunks: {self.streaming_chunks}")
try:
complete_streaming_response = litellm.stream_chunk_builder(
self.streaming_chunks,
messages=self.model_call_details.get("messages", None),
start_time=start_time,
end_time=end_time,
)
except Exception as e:
print_verbose(
f"Error occurred building stream chunk: {traceback.format_exc()}"
)
complete_streaming_response = None
else:
self.streaming_chunks.append(result)
if complete_streaming_response is not None:
print_verbose("Async success callbacks: Got a complete streaming response")
self.model_call_details["async_complete_streaming_response"] = (
complete_streaming_response
)
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = litellm.completion_cost(
completion_response=complete_streaming_response,
model=base_model,
)
verbose_logger.debug(
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
if self.dynamic_async_success_callbacks is not None and isinstance(
self.dynamic_async_success_callbacks, list
):
callbacks = self.dynamic_async_success_callbacks
## keep the internal functions ##
for callback in litellm._async_success_callback:
callback_name = ""
if isinstance(callback, CustomLogger):
callback_name = callback.__class__.__name__
if callable(callback):
callback_name = callback.__name__
if "_PROXY_" in callback_name:
callbacks.append(callback)
else:
callbacks = litellm._async_success_callback
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
# check if callback can run for this request
litellm_params = self.model_call_details.get("litellm_params", {})
if litellm_params.get("no-log", False) == True:
# proxy cost tracking cal backs should run
if not (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
print_verbose("no-log request, skipping logging")
continue
try:
if kwargs.get("no-log", False) == True:
print_verbose("no-log request, skipping logging")
continue
if callback == "cache" and litellm.cache is not None:
# set_cache once complete streaming response is built
print_verbose("async success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "async_complete_streaming_response" not in kwargs:
print_verbose(
f"async success_callback: reaches cache for logging, there is no async_complete_streaming_response. Kwargs={kwargs}\n\n"
)
pass
else:
print_verbose(
"async success_callback: reaches cache for logging, there is a async_complete_streaming_response. Adding to cache"
)
result = kwargs["async_complete_streaming_response"]
# only add to cache once we have a complete streaming response
if litellm.cache is not None and not isinstance(
litellm.cache.cache, S3Cache
):
await litellm.cache.async_add_cache(result, **kwargs)
else:
litellm.cache.add_cache(result, **kwargs)
if callback == "openmeter":
global openMeterLogger
if self.stream == True:
if (
"async_complete_streaming_response"
in self.model_call_details
):
await openMeterLogger.async_log_success_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"async_complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
)
else:
await openMeterLogger.async_log_stream_event( # [TODO]: move this to being an async log stream event function
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
await openMeterLogger.async_log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if isinstance(callback, CustomLogger): # custom logger class
if self.stream == True:
if (
"async_complete_streaming_response"
in self.model_call_details
):
await callback.async_log_success_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"async_complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
)
else:
await callback.async_log_stream_event( # [TODO]: move this to being an async log stream event function
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
await callback.async_log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
if self.stream:
if (
"async_complete_streaming_response"
in self.model_call_details
):
await customLogger.async_log_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"async_complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
else:
await customLogger.async_log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
if callback == "dynamodb":
global dynamoLogger
if dynamoLogger is None:
dynamoLogger = DyanmoDBLogger()
if self.stream:
if (
"async_complete_streaming_response"
in self.model_call_details
):
print_verbose(
"DynamoDB Logger: Got Stream Event - Completed Stream Response"
)
await dynamoLogger._async_log_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"async_complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
else:
print_verbose(
"DynamoDB Logger: Got Stream Event - No complete stream response as yet"
)
else:
await dynamoLogger._async_log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
def _failure_handler_helper_fn(
self, exception, traceback_exception, start_time=None, end_time=None
):
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
self.model_call_details.setdefault("original_response", None)
return start_time, end_time
def failure_handler(
self, exception, traceback_exception, start_time=None, end_time=None
):
print_verbose(f"Logging Details LiteLLM-Failure Call")
try:
start_time, end_time = self._failure_handler_helper_fn(
exception=exception,
traceback_exception=traceback_exception,
start_time=start_time,
end_time=end_time,
)
callbacks = [] # init this to empty incase it's not created
if self.dynamic_failure_callbacks is not None and isinstance(
self.dynamic_failure_callbacks, list
):
callbacks = self.dynamic_failure_callbacks
## keep the internal functions ##
for callback in litellm.failure_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.failure_callback
result = None # result sent to all loggers, init this to None incase it's not created
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
elif callback == "lunary":
print_verbose("reaches lunary for logging error!")
model = self.model
input = self.model_call_details["input"]
_type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
lunaryLogger.log_event(
type=_type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(
f"capture exception not initialized: {capture_exception}"
)
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
elif (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
): # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
elif callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for logging failure")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
start_time=start_time,
end_time=end_time,
response_obj=None,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
status_message=str(exception),
level="ERROR",
kwargs=self.model_call_details,
)
elif callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
kwargs["exception"] = str(exception)
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {str(e)}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
async def async_failure_handler(
self, exception, traceback_exception, start_time=None, end_time=None
):
"""
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
"""
start_time, end_time = self._failure_handler_helper_fn(
exception=exception,
traceback_exception=traceback_exception,
start_time=start_time,
end_time=end_time,
)
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm._async_failure_callback:
try:
if isinstance(callback, CustomLogger): # custom logger class
await callback.async_log_failure_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
await customLogger.async_log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
def redact_message_input_output_from_logging(self, result):
"""
Removes messages, prompts, input, response from logging. This modifies the data in-place
only redacts when litellm.turn_off_message_logging == True
"""
# check if user opted out of logging message/response to callbacks
if litellm.turn_off_message_logging == True:
# remove messages, prompts, input, response from logging
self.model_call_details["messages"] = "redacted-by-litellm"
self.model_call_details["prompt"] = ""
self.model_call_details["input"] = ""
# response cleaning
# ChatCompletion Responses
if self.stream and "complete_streaming_response" in self.model_call_details:
_streaming_response = self.model_call_details[
"complete_streaming_response"
]
for choice in _streaming_response.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
else:
if result is not None:
if isinstance(result, litellm.ModelResponse):
if hasattr(result, "choices") and result.choices is not None:
for choice in result.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
|
(model, messages, stream, call_type, start_time, litellm_call_id, function_id, dynamic_success_callbacks=None, dynamic_failure_callbacks=None, dynamic_async_success_callbacks=None, langfuse_public_key=None, langfuse_secret=None)
|
63,477 |
litellm.utils
|
__init__
| null |
def __init__(
self,
model,
messages,
stream,
call_type,
start_time,
litellm_call_id,
function_id,
dynamic_success_callbacks=None,
dynamic_failure_callbacks=None,
dynamic_async_success_callbacks=None,
langfuse_public_key=None,
langfuse_secret=None,
):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(
f"Invalid call_type {call_type}. Allowed values: {allowed_values}"
)
if messages is not None:
if isinstance(messages, str):
messages = [
{"role": "user", "content": messages}
] # convert text completion input to the chat completion format
elif (
isinstance(messages, list)
and len(messages) > 0
and isinstance(messages[0], str)
):
new_messages = []
for m in messages:
new_messages.append({"role": "user", "content": m})
messages = new_messages
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
self.sync_streaming_chunks = [] # for generating complete stream response
self.model_call_details = {}
self.dynamic_input_callbacks = [] # [TODO] callbacks set for just that call
self.dynamic_failure_callbacks = dynamic_failure_callbacks
self.dynamic_success_callbacks = (
dynamic_success_callbacks # callbacks set for just that call
)
self.dynamic_async_success_callbacks = (
dynamic_async_success_callbacks # callbacks set for just that call
)
## DYNAMIC LANGFUSE KEYS ##
self.langfuse_public_key = langfuse_public_key
self.langfuse_secret = langfuse_secret
## TIME TO FIRST TOKEN LOGGING ##
self.completion_start_time: Optional[datetime.datetime] = None
|
(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id, dynamic_success_callbacks=None, dynamic_failure_callbacks=None, dynamic_async_success_callbacks=None, langfuse_public_key=None, langfuse_secret=None)
|
63,478 |
litellm.utils
|
_failure_handler_helper_fn
| null |
def _failure_handler_helper_fn(
self, exception, traceback_exception, start_time=None, end_time=None
):
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
self.model_call_details.setdefault("original_response", None)
return start_time, end_time
|
(self, exception, traceback_exception, start_time=None, end_time=None)
|
63,479 |
litellm.utils
|
_pre_call
|
Common helper function across the sync + async pre-call function
|
def _pre_call(self, input, api_key, model=None, additional_args={}):
"""
Common helper function across the sync + async pre-call function
"""
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
|
(self, input, api_key, model=None, additional_args={})
|
63,480 |
litellm.utils
|
_success_handler_helper_fn
| null |
def _success_handler_helper_fn(
self, result=None, start_time=None, end_time=None, cache_hit=None
):
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
if self.completion_start_time is None:
self.completion_start_time = end_time
self.model_call_details["completion_start_time"] = (
self.completion_start_time
)
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
self.model_call_details["cache_hit"] = cache_hit
## if model in model cost map - log the response cost
## else set cost to None
verbose_logger.debug(f"Model={self.model};")
if (
result is not None
and (
isinstance(result, ModelResponse)
or isinstance(result, EmbeddingResponse)
or isinstance(result, ImageResponse)
or isinstance(result, TranscriptionResponse)
or isinstance(result, TextCompletionResponse)
)
and self.stream != True
): # handle streaming separately
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
result._hidden_params["optional_params"] = self.optional_params
if (
self.call_type == CallTypes.aimage_generation.value
or self.call_type == CallTypes.image_generation.value
):
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=result,
model=self.model,
call_type=self.call_type,
custom_llm_provider=self.model_call_details.get(
"custom_llm_provider", None
), # set for img gen models
)
)
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=result,
call_type=self.call_type,
model=base_model,
)
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
else: # streaming chunks + image gen.
self.model_call_details["response_cost"] = None
if (
litellm.max_budget
and self.stream == False
and result is not None
and "content" in result
):
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(
model=self.model,
prompt="",
completion=result["content"],
total_time=float_diff,
)
return start_time, end_time, result
except Exception as e:
raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}")
|
(self, result=None, start_time=None, end_time=None, cache_hit=None)
|
63,481 |
litellm.utils
|
async_failure_handler
|
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
|
def failure_handler(
self, exception, traceback_exception, start_time=None, end_time=None
):
print_verbose(f"Logging Details LiteLLM-Failure Call")
try:
start_time, end_time = self._failure_handler_helper_fn(
exception=exception,
traceback_exception=traceback_exception,
start_time=start_time,
end_time=end_time,
)
callbacks = [] # init this to empty incase it's not created
if self.dynamic_failure_callbacks is not None and isinstance(
self.dynamic_failure_callbacks, list
):
callbacks = self.dynamic_failure_callbacks
## keep the internal functions ##
for callback in litellm.failure_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.failure_callback
result = None # result sent to all loggers, init this to None incase it's not created
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
elif callback == "lunary":
print_verbose("reaches lunary for logging error!")
model = self.model
input = self.model_call_details["input"]
_type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
lunaryLogger.log_event(
type=_type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(
f"capture exception not initialized: {capture_exception}"
)
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
elif (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
): # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
elif callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for logging failure")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
start_time=start_time,
end_time=end_time,
response_obj=None,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
status_message=str(exception),
level="ERROR",
kwargs=self.model_call_details,
)
elif callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
kwargs["exception"] = str(exception)
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {str(e)}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
|
(self, exception, traceback_exception, start_time=None, end_time=None)
|
63,482 |
litellm.utils
|
async_success_handler
|
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
|
def success_handler(
self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs
):
print_verbose(f"Logging Details LiteLLM-Success Call: {cache_hit}")
start_time, end_time, result = self._success_handler_helper_fn(
start_time=start_time,
end_time=end_time,
result=result,
cache_hit=cache_hit,
)
# print(f"original response in success handler: {self.model_call_details['original_response']}")
try:
print_verbose(f"success callbacks: {litellm.success_callback}")
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream and isinstance(result, ModelResponse):
if (
result.choices[0].finish_reason is not None
): # if it's the last chunk
self.sync_streaming_chunks.append(result)
# print_verbose(f"final set of received chunks: {self.sync_streaming_chunks}")
try:
complete_streaming_response = litellm.stream_chunk_builder(
self.sync_streaming_chunks,
messages=self.model_call_details.get("messages", None),
start_time=start_time,
end_time=end_time,
)
except Exception as e:
complete_streaming_response = None
else:
self.sync_streaming_chunks.append(result)
if complete_streaming_response is not None:
print_verbose(
f"Logging Details LiteLLM-Success Call streaming complete"
)
self.model_call_details["complete_streaming_response"] = (
complete_streaming_response
)
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=complete_streaming_response,
model=base_model,
)
)
verbose_logger.debug(
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
if self.dynamic_success_callbacks is not None and isinstance(
self.dynamic_success_callbacks, list
):
callbacks = self.dynamic_success_callbacks
## keep the internal functions ##
for callback in litellm.success_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.success_callback
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
litellm_params = self.model_call_details.get("litellm_params", {})
if litellm_params.get("no-log", False) == True:
# proxy cost tracking cal backs should run
if not (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
print_verbose("no-log request, skipping logging")
continue
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(
f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}"
)
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs = self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get(
"litellm_call_id", str(uuid.uuid4())
),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmith for logging!")
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
print_verbose(
"reaches langsmith for streaming logging!"
)
result = kwargs["complete_streaming_response"]
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "lunary":
print_verbose("reaches lunary for logging!")
model = self.model
kwargs = self.model_call_details
input = kwargs.get("messages", kwargs.get("input", None))
type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
result = kwargs["complete_streaming_response"]
lunaryLogger.log_event(
type=type,
kwargs=kwargs,
event="end",
model=model,
input=input,
user_id=kwargs.get("user", None),
# user_props=self.model_call_details.get("user_props", None),
extra=kwargs.get("optional_params", {}),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["input"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "datadog":
global dataDogLogger
verbose_logger.debug("reaches datadog for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"datadog: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches datadog for streaming logging!")
result = kwargs["complete_streaming_response"]
dataDogLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"prometheus: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose(
"reaches prometheus for streaming logging!"
)
result = kwargs["complete_streaming_response"]
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "generic":
global genericAPILogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if genericAPILogger is None:
genericAPILogger = GenericAPILogger()
genericAPILogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "clickhouse":
global clickHouseLogger
verbose_logger.debug("reaches clickhouse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches clickhouse for streaming logging!"
)
result = kwargs["complete_streaming_response"]
if clickHouseLogger is None:
clickHouseLogger = ClickhouseLogger()
clickHouseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "greenscale":
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches greenscale for streaming logging!"
)
result = kwargs["complete_streaming_response"]
greenscaleLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "cache" and litellm.cache is not None:
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
print_verbose("success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "complete_streaming_response" not in kwargs:
print_verbose(
f"success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n"
)
pass
else:
print_verbose(
"success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache"
)
result = kwargs["complete_streaming_response"]
# only add to cache once we have a complete streaming response
litellm.cache.add_cache(result, **kwargs)
if callback == "athina":
deep_copy = {}
for k, v in self.model_call_details.items():
deep_copy[k] = v
athinaLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "s3":
global s3Logger
if s3Logger is None:
s3Logger = S3Logger()
if self.stream:
if "complete_streaming_response" in self.model_call_details:
print_verbose(
"S3Logger Logger: Got Stream Event - Completed Stream Response"
)
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
else:
print_verbose(
"S3Logger Logger: Got Stream Event - No complete stream response as yet"
)
else:
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if (
callback == "openmeter"
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
):
global openMeterLogger
if openMeterLogger is None:
print_verbose("Instantiates openmeter client")
openMeterLogger = OpenMeterLogger()
if self.stream and complete_streaming_response is None:
openMeterLogger.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
openMeterLogger.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
callable(callback) == True
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger functions
print_verbose(
f"success callbacks: Running Custom Callback Function"
)
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
|
(self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs)
|
63,483 |
litellm.utils
|
failure_handler
| null |
def failure_handler(
self, exception, traceback_exception, start_time=None, end_time=None
):
print_verbose(f"Logging Details LiteLLM-Failure Call")
try:
start_time, end_time = self._failure_handler_helper_fn(
exception=exception,
traceback_exception=traceback_exception,
start_time=start_time,
end_time=end_time,
)
callbacks = [] # init this to empty incase it's not created
if self.dynamic_failure_callbacks is not None and isinstance(
self.dynamic_failure_callbacks, list
):
callbacks = self.dynamic_failure_callbacks
## keep the internal functions ##
for callback in litellm.failure_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.failure_callback
result = None # result sent to all loggers, init this to None incase it's not created
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
elif callback == "lunary":
print_verbose("reaches lunary for logging error!")
model = self.model
input = self.model_call_details["input"]
_type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
lunaryLogger.log_event(
type=_type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(
f"capture exception not initialized: {capture_exception}"
)
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
elif (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
): # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
elif callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for logging failure")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
start_time=start_time,
end_time=end_time,
response_obj=None,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
status_message=str(exception),
level="ERROR",
kwargs=self.model_call_details,
)
elif callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
kwargs["exception"] = str(exception)
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {str(e)}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
|
(self, exception, traceback_exception, start_time=None, end_time=None)
|
63,484 |
litellm.utils
|
post_call
| null |
def post_call(
self, original_response, input=None, api_key=None, additional_args={}
):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs["POST_CALL"] = locals()
if isinstance(original_response, dict):
original_response = json.dumps(original_response)
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(
f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n",
log_level="DEBUG",
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
self.redact_message_input_output_from_logging(result=original_response)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
callbacks = litellm.input_callback + self.dynamic_input_callbacks
for callback in callbacks:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
details_to_log.pop("messages", None)
details_to_log.pop("input", None)
details_to_log.pop("prompt", None)
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {details_to_log}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_post_api_call(
kwargs=self.model_call_details,
response_obj=None,
start_time=self.start_time,
end_time=None,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
|
(self, original_response, input=None, api_key=None, additional_args={})
|
63,485 |
litellm.utils
|
pre_call
| null |
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
litellm.error_logs["PRE_CALL"] = locals()
try:
self._pre_call(
input=input,
api_key=api_key,
model=model,
additional_args=additional_args,
)
# User Logging -> if you pass in a custom logging function
headers = additional_args.get("headers", {})
if headers is None:
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = additional_args.get("api_base", "")
self.model_call_details["litellm_params"]["api_base"] = str(
api_base
) # used for alerting
masked_headers = {
k: (v[:-20] + "*" * 20) if (isinstance(v, str) and len(v) > 20) else v
for k, v in headers.items()
}
formatted_headers = " ".join(
[f"-H '{k}: {v}'" for k, v in masked_headers.items()]
)
verbose_logger.debug(f"PRE-API-CALL ADDITIONAL ARGS: {additional_args}")
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
curl_command += f"{api_base} \\\n"
curl_command += (
f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else ""
)
curl_command += f"-d '{str(data)}'\n"
if additional_args.get("request_str", None) is not None:
# print the sagemaker / bedrock client request
curl_command = "\nRequest Sent from LiteLLM:\n"
curl_command += additional_args.get("request_str", None)
elif api_base == "":
curl_command = self.model_call_details
# only print verbose if verbose logger is not set
if verbose_logger.level == 0:
# this means verbose logger was not switched on - user is in litellm.set_verbose=True
print_verbose(f"\033[92m{curl_command}\033[0m\n")
if litellm.json_logs:
verbose_logger.info(
"POST Request Sent from LiteLLM",
extra={"api_base": {api_base}, **masked_headers},
)
else:
verbose_logger.info(f"\033[92m{curl_command}\033[0m\n")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
callbacks = litellm.input_callback + self.dynamic_input_callbacks
for callback in callbacks:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "sentry" and add_breadcrumb:
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
details_to_log.pop("messages", None)
details_to_log.pop("input", None)
details_to_log.pop("prompt", None)
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {details_to_log}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_pre_api_call(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
|
(self, input, api_key, model=None, additional_args={})
|
63,486 |
litellm.utils
|
redact_message_input_output_from_logging
|
Removes messages, prompts, input, response from logging. This modifies the data in-place
only redacts when litellm.turn_off_message_logging == True
|
def redact_message_input_output_from_logging(self, result):
"""
Removes messages, prompts, input, response from logging. This modifies the data in-place
only redacts when litellm.turn_off_message_logging == True
"""
# check if user opted out of logging message/response to callbacks
if litellm.turn_off_message_logging == True:
# remove messages, prompts, input, response from logging
self.model_call_details["messages"] = "redacted-by-litellm"
self.model_call_details["prompt"] = ""
self.model_call_details["input"] = ""
# response cleaning
# ChatCompletion Responses
if self.stream and "complete_streaming_response" in self.model_call_details:
_streaming_response = self.model_call_details[
"complete_streaming_response"
]
for choice in _streaming_response.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
else:
if result is not None:
if isinstance(result, litellm.ModelResponse):
if hasattr(result, "choices") and result.choices is not None:
for choice in result.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
|
(self, result)
|
63,487 |
litellm.utils
|
success_handler
| null |
def success_handler(
self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs
):
print_verbose(f"Logging Details LiteLLM-Success Call: {cache_hit}")
start_time, end_time, result = self._success_handler_helper_fn(
start_time=start_time,
end_time=end_time,
result=result,
cache_hit=cache_hit,
)
# print(f"original response in success handler: {self.model_call_details['original_response']}")
try:
print_verbose(f"success callbacks: {litellm.success_callback}")
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream and isinstance(result, ModelResponse):
if (
result.choices[0].finish_reason is not None
): # if it's the last chunk
self.sync_streaming_chunks.append(result)
# print_verbose(f"final set of received chunks: {self.sync_streaming_chunks}")
try:
complete_streaming_response = litellm.stream_chunk_builder(
self.sync_streaming_chunks,
messages=self.model_call_details.get("messages", None),
start_time=start_time,
end_time=end_time,
)
except Exception as e:
complete_streaming_response = None
else:
self.sync_streaming_chunks.append(result)
if complete_streaming_response is not None:
print_verbose(
f"Logging Details LiteLLM-Success Call streaming complete"
)
self.model_call_details["complete_streaming_response"] = (
complete_streaming_response
)
try:
if self.model_call_details.get("cache_hit", False) == True:
self.model_call_details["response_cost"] = 0.0
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
self.model_call_details["response_cost"] = (
litellm.completion_cost(
completion_response=complete_streaming_response,
model=base_model,
)
)
verbose_logger.debug(
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
if self.dynamic_success_callbacks is not None and isinstance(
self.dynamic_success_callbacks, list
):
callbacks = self.dynamic_success_callbacks
## keep the internal functions ##
for callback in litellm.success_callback:
if (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
callbacks.append(callback)
else:
callbacks = litellm.success_callback
self.redact_message_input_output_from_logging(result=result)
for callback in callbacks:
try:
litellm_params = self.model_call_details.get("litellm_params", {})
if litellm_params.get("no-log", False) == True:
# proxy cost tracking cal backs should run
if not (
isinstance(callback, CustomLogger)
and "_PROXY_" in callback.__class__.__name__
):
print_verbose("no-log request, skipping logging")
continue
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(
f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}"
)
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type=self.call_type,
stream=self.stream,
)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs = self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get(
"litellm_call_id", str(uuid.uuid4())
),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmith for logging!")
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
print_verbose(
"reaches langsmith for streaming logging!"
)
result = kwargs["complete_streaming_response"]
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "lunary":
print_verbose("reaches lunary for logging!")
model = self.model
kwargs = self.model_call_details
input = kwargs.get("messages", kwargs.get("input", None))
type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
break
else:
result = kwargs["complete_streaming_response"]
lunaryLogger.log_event(
type=type,
kwargs=kwargs,
event="end",
model=model,
input=input,
user_id=kwargs.get("user", None),
# user_props=self.model_call_details.get("user_props", None),
extra=kwargs.get("optional_params", {}),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["input"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
global langFuseLogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if langFuseLogger is None or (
(
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
and (
self.langfuse_public_key is not None
and self.langfuse_public_key
!= langFuseLogger.public_key
)
):
langFuseLogger = LangFuseLogger(
langfuse_public_key=self.langfuse_public_key,
langfuse_secret=self.langfuse_secret,
)
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "datadog":
global dataDogLogger
verbose_logger.debug("reaches datadog for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"datadog: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose("reaches datadog for streaming logging!")
result = kwargs["complete_streaming_response"]
dataDogLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"prometheus: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose(
"reaches prometheus for streaming logging!"
)
result = kwargs["complete_streaming_response"]
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "generic":
global genericAPILogger
verbose_logger.debug("reaches langfuse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if genericAPILogger is None:
genericAPILogger = GenericAPILogger()
genericAPILogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "clickhouse":
global clickHouseLogger
verbose_logger.debug("reaches clickhouse for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches clickhouse for streaming logging!"
)
result = kwargs["complete_streaming_response"]
if clickHouseLogger is None:
clickHouseLogger = ClickhouseLogger()
clickHouseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "greenscale":
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
break
else:
print_verbose(
"reaches greenscale for streaming logging!"
)
result = kwargs["complete_streaming_response"]
greenscaleLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "cache" and litellm.cache is not None:
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
print_verbose("success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "complete_streaming_response" not in kwargs:
print_verbose(
f"success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n"
)
pass
else:
print_verbose(
"success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache"
)
result = kwargs["complete_streaming_response"]
# only add to cache once we have a complete streaming response
litellm.cache.add_cache(result, **kwargs)
if callback == "athina":
deep_copy = {}
for k, v in self.model_call_details.items():
deep_copy[k] = v
athinaLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "s3":
global s3Logger
if s3Logger is None:
s3Logger = S3Logger()
if self.stream:
if "complete_streaming_response" in self.model_call_details:
print_verbose(
"S3Logger Logger: Got Stream Event - Completed Stream Response"
)
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details[
"complete_streaming_response"
],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
else:
print_verbose(
"S3Logger Logger: Got Stream Event - No complete stream response as yet"
)
else:
s3Logger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if (
callback == "openmeter"
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
):
global openMeterLogger
if openMeterLogger is None:
print_verbose("Instantiates openmeter client")
openMeterLogger = OpenMeterLogger()
if self.stream and complete_streaming_response is None:
openMeterLogger.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
openMeterLogger.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
isinstance(callback, CustomLogger)
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = (
self.model_call_details.get(
"complete_streaming_response", {}
)
)
result = self.model_call_details["complete_response"]
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if (
callable(callback) == True
and self.model_call_details.get("litellm_params", {}).get(
"acompletion", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aembedding", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"aimage_generation", False
)
== False
and self.model_call_details.get("litellm_params", {}).get(
"atranscription", False
)
== False
): # custom logger functions
print_verbose(
f"success callbacks: Running Custom Callback Function"
)
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
|
(self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs)
|
63,488 |
litellm.utils
|
update_environment_variables
| null |
def update_environment_variables(
self, model, user, optional_params, litellm_params, **additional_params
):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params.get("logger_fn", None)
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time,
"stream": self.stream,
"user": user,
"call_type": str(self.call_type),
"litellm_call_id": self.litellm_call_id,
"completion_start_time": self.completion_start_time,
**self.optional_params,
**additional_params,
}
|
(self, model, user, optional_params, litellm_params, **additional_params)
|
63,489 |
litellm.llms.maritalk
|
MaritTalkConfig
|
The class `MaritTalkConfig` provides configuration for the MaritTalk's API interface. Here are the parameters:
- `max_tokens` (integer): Maximum number of tokens the model will generate as part of the response. Default is 1.
- `model` (string): The model used for conversation. Default is 'maritalk'.
- `do_sample` (boolean): If set to True, the API will generate a response using sampling. Default is True.
- `temperature` (number): A non-negative float controlling the randomness in generation. Lower temperatures result in less random generations. Default is 0.7.
- `top_p` (number): Selection threshold for token inclusion based on cumulative probability. Default is 0.95.
- `repetition_penalty` (number): Penalty for repetition in the generated conversation. Default is 1.
- `stopping_tokens` (list of string): List of tokens where the conversation can be stopped/stopped.
|
class MaritTalkConfig:
"""
The class `MaritTalkConfig` provides configuration for the MaritTalk's API interface. Here are the parameters:
- `max_tokens` (integer): Maximum number of tokens the model will generate as part of the response. Default is 1.
- `model` (string): The model used for conversation. Default is 'maritalk'.
- `do_sample` (boolean): If set to True, the API will generate a response using sampling. Default is True.
- `temperature` (number): A non-negative float controlling the randomness in generation. Lower temperatures result in less random generations. Default is 0.7.
- `top_p` (number): Selection threshold for token inclusion based on cumulative probability. Default is 0.95.
- `repetition_penalty` (number): Penalty for repetition in the generated conversation. Default is 1.
- `stopping_tokens` (list of string): List of tokens where the conversation can be stopped/stopped.
"""
max_tokens: Optional[int] = None
model: Optional[str] = None
do_sample: Optional[bool] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
repetition_penalty: Optional[float] = None
stopping_tokens: Optional[List[str]] = None
def __init__(
self,
max_tokens: Optional[int] = None,
model: Optional[str] = None,
do_sample: Optional[bool] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
stopping_tokens: Optional[List[str]] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
@classmethod
def get_config(cls):
return {
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
|
(max_tokens: Optional[int] = None, model: Optional[str] = None, do_sample: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, stopping_tokens: Optional[List[str]] = None) -> None
|
63,490 |
litellm.llms.maritalk
|
__init__
| null |
def __init__(
self,
max_tokens: Optional[int] = None,
model: Optional[str] = None,
do_sample: Optional[bool] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
stopping_tokens: Optional[List[str]] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
|
(self, max_tokens: Optional[int] = None, model: Optional[str] = None, do_sample: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, stopping_tokens: Optional[List[str]] = None) -> NoneType
|
63,491 |
litellm.utils
|
Message
| null |
class Message(OpenAIObject):
def __init__(
self,
content: Optional[str] = "default",
role="assistant",
logprobs=None,
function_call=None,
tool_calls=None,
**params,
):
super(Message, self).__init__(**params)
self.content = content
self.role = role
if function_call is not None:
self.function_call = FunctionCall(**function_call)
if tool_calls is not None:
self.tool_calls = []
for tool_call in tool_calls:
self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call))
if logprobs is not None:
self._logprobs = ChoiceLogprobs(**logprobs)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
def json(self, **kwargs):
try:
return self.model_dump() # noqa
except:
# if using pydantic v1
return self.dict()
|
(content: Optional[str] = 'default', role='assistant', logprobs=None, function_call=None, tool_calls=None, **params)
|
63,499 |
litellm.utils
|
__init__
| null |
def __init__(
self,
content: Optional[str] = "default",
role="assistant",
logprobs=None,
function_call=None,
tool_calls=None,
**params,
):
super(Message, self).__init__(**params)
self.content = content
self.role = role
if function_call is not None:
self.function_call = FunctionCall(**function_call)
if tool_calls is not None:
self.tool_calls = []
for tool_call in tool_calls:
self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call))
if logprobs is not None:
self._logprobs = ChoiceLogprobs(**logprobs)
|
(self, content: Optional[str] = 'default', role='assistant', logprobs=None, function_call=None, tool_calls=None, **params)
|
63,525 |
litellm.types.llms.openai
|
MessageData
| null |
class MessageData(TypedDict):
role: Literal["user", "assistant"]
content: str
attachments: Optional[List[Attachment]]
metadata: Optional[dict]
| null |
63,526 |
litellm.types.router
|
ModelConfig
| null |
class ModelConfig(BaseModel):
model_name: str
litellm_params: Union[CompletionRequest, EmbeddingRequest]
tpm: int
rpm: int
class Config:
protected_namespaces = ()
|
(*, model_name: str, litellm_params: Union[litellm.types.completion.CompletionRequest, litellm.types.embedding.EmbeddingRequest], tpm: int, rpm: int) -> None
|
63,555 |
litellm.types.router
|
ModelInfo
| null |
class ModelInfo(BaseModel):
id: Optional[
str
] # Allow id to be optional on input, but it will always be present as a str in the model instance
db_model: bool = (
False # used for proxy - to separate models which are stored in the db vs. config.
)
def __init__(self, id: Optional[Union[str, int]] = None, **params):
if id is None:
id = str(uuid.uuid4()) # Generate a UUID if id is None or not provided
elif isinstance(id, int):
id = str(id)
super().__init__(id=id, **params)
class Config:
extra = "allow"
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
|
(id: Optional[str] = None, *, db_model: bool = False, **params) -> None
|
63,564 |
litellm.types.router
|
__init__
| null |
def __init__(self, id: Optional[Union[str, int]] = None, **params):
if id is None:
id = str(uuid.uuid4()) # Generate a UUID if id is None or not provided
elif isinstance(id, int):
id = str(id)
super().__init__(id=id, **params)
|
(self, id: Union[str, int, NoneType] = None, **params)
|
63,588 |
litellm.utils
|
ModelResponse
| null |
class ModelResponse(OpenAIObject):
id: str
"""A unique identifier for the completion."""
choices: List[Union[Choices, StreamingChoices]]
"""The list of completion choices the model generated for the input prompt."""
created: int
"""The Unix timestamp (in seconds) of when the completion was created."""
model: Optional[str] = None
"""The model used for completion."""
object: str
"""The object type, which is always "text_completion" """
system_fingerprint: Optional[str] = None
"""This fingerprint represents the backend configuration that the model runs with.
Can be used in conjunction with the `seed` request parameter to understand when
backend changes have been made that might impact determinism.
"""
_hidden_params: dict = {}
def __init__(
self,
id=None,
choices=None,
created=None,
model=None,
object=None,
system_fingerprint=None,
usage=None,
stream=None,
stream_options=None,
response_ms=None,
hidden_params=None,
**params,
):
if stream is not None and stream == True:
object = "chat.completion.chunk"
if choices is not None and isinstance(choices, list):
new_choices = []
for choice in choices:
if isinstance(choice, StreamingChoices):
_new_choice = choice
elif isinstance(choice, dict):
_new_choice = StreamingChoices(**choice)
new_choices.append(_new_choice)
choices = new_choices
else:
choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
object = "embedding"
else:
object = "chat.completion"
if choices is not None and isinstance(choices, list):
new_choices = []
for choice in choices:
if isinstance(choice, Choices):
_new_choice = choice
elif isinstance(choice, dict):
_new_choice = Choices(**choice)
new_choices.append(_new_choice)
choices = new_choices
else:
choices = [Choices()]
if id is None:
id = _generate_id()
else:
id = id
if created is None:
created = int(time.time())
else:
created = created
model = model
if usage is not None:
usage = usage
elif stream is None or stream == False:
usage = Usage()
elif (
stream == True
and stream_options is not None
and stream_options.get("include_usage") == True
):
usage = Usage()
if hidden_params:
self._hidden_params = hidden_params
init_values = {
"id": id,
"choices": choices,
"created": created,
"model": model,
"object": object,
"system_fingerprint": system_fingerprint,
}
if usage is not None:
init_values["usage"] = usage
super().__init__(
**init_values,
**params,
)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
def json(self, **kwargs):
try:
return self.model_dump() # noqa
except:
# if using pydantic v1
return self.dict()
|
(id=None, choices=None, created=None, model=None, object=None, system_fingerprint=None, usage=None, stream=None, stream_options=None, response_ms=None, hidden_params=None, **params)
|
63,597 |
litellm.utils
|
__init__
| null |
def __init__(
self,
id=None,
choices=None,
created=None,
model=None,
object=None,
system_fingerprint=None,
usage=None,
stream=None,
stream_options=None,
response_ms=None,
hidden_params=None,
**params,
):
if stream is not None and stream == True:
object = "chat.completion.chunk"
if choices is not None and isinstance(choices, list):
new_choices = []
for choice in choices:
if isinstance(choice, StreamingChoices):
_new_choice = choice
elif isinstance(choice, dict):
_new_choice = StreamingChoices(**choice)
new_choices.append(_new_choice)
choices = new_choices
else:
choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
object = "embedding"
else:
object = "chat.completion"
if choices is not None and isinstance(choices, list):
new_choices = []
for choice in choices:
if isinstance(choice, Choices):
_new_choice = choice
elif isinstance(choice, dict):
_new_choice = Choices(**choice)
new_choices.append(_new_choice)
choices = new_choices
else:
choices = [Choices()]
if id is None:
id = _generate_id()
else:
id = id
if created is None:
created = int(time.time())
else:
created = created
model = model
if usage is not None:
usage = usage
elif stream is None or stream == False:
usage = Usage()
elif (
stream == True
and stream_options is not None
and stream_options.get("include_usage") == True
):
usage = Usage()
if hidden_params:
self._hidden_params = hidden_params
init_values = {
"id": id,
"choices": choices,
"created": created,
"model": model,
"object": object,
"system_fingerprint": system_fingerprint,
}
if usage is not None:
init_values["usage"] = usage
super().__init__(
**init_values,
**params,
)
|
(self, id=None, choices=None, created=None, model=None, object=None, system_fingerprint=None, usage=None, stream=None, stream_options=None, response_ms=None, hidden_params=None, **params)
|
63,623 |
litellm
|
MyLocal
| null |
class MyLocal(threading.local):
def __init__(self):
self.user = "Hello World"
|
()
|
63,624 |
litellm
|
__init__
| null |
def __init__(self):
self.user = "Hello World"
|
(self)
|
63,625 |
litellm.llms.nlp_cloud
|
NLPCloudConfig
|
Reference: https://docs.nlpcloud.com/#generation
- `max_length` (int): Optional. The maximum number of tokens that the generated text should contain.
- `length_no_input` (boolean): Optional. Whether `min_length` and `max_length` should not include the length of the input text.
- `end_sequence` (string): Optional. A specific token that should be the end of the generated sequence.
- `remove_end_sequence` (boolean): Optional. Whether to remove the `end_sequence` string from the result.
- `remove_input` (boolean): Optional. Whether to remove the input text from the result.
- `bad_words` (list of strings): Optional. List of tokens that are not allowed to be generated.
- `temperature` (float): Optional. Temperature sampling. It modulates the next token probabilities.
- `top_p` (float): Optional. Top P sampling. Below 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
- `top_k` (int): Optional. Top K sampling. The number of highest probability vocabulary tokens to keep for top k filtering.
- `repetition_penalty` (float): Optional. Prevents the same word from being repeated too many times.
- `num_beams` (int): Optional. Number of beams for beam search.
- `num_return_sequences` (int): Optional. The number of independently computed returned sequences.
|
class NLPCloudConfig:
"""
Reference: https://docs.nlpcloud.com/#generation
- `max_length` (int): Optional. The maximum number of tokens that the generated text should contain.
- `length_no_input` (boolean): Optional. Whether `min_length` and `max_length` should not include the length of the input text.
- `end_sequence` (string): Optional. A specific token that should be the end of the generated sequence.
- `remove_end_sequence` (boolean): Optional. Whether to remove the `end_sequence` string from the result.
- `remove_input` (boolean): Optional. Whether to remove the input text from the result.
- `bad_words` (list of strings): Optional. List of tokens that are not allowed to be generated.
- `temperature` (float): Optional. Temperature sampling. It modulates the next token probabilities.
- `top_p` (float): Optional. Top P sampling. Below 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
- `top_k` (int): Optional. Top K sampling. The number of highest probability vocabulary tokens to keep for top k filtering.
- `repetition_penalty` (float): Optional. Prevents the same word from being repeated too many times.
- `num_beams` (int): Optional. Number of beams for beam search.
- `num_return_sequences` (int): Optional. The number of independently computed returned sequences.
"""
max_length: Optional[int] = None
length_no_input: Optional[bool] = None
end_sequence: Optional[str] = None
remove_end_sequence: Optional[bool] = None
remove_input: Optional[bool] = None
bad_words: Optional[list] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
repetition_penalty: Optional[float] = None
num_beams: Optional[int] = None
num_return_sequences: Optional[int] = None
def __init__(
self,
max_length: Optional[int] = None,
length_no_input: Optional[bool] = None,
end_sequence: Optional[str] = None,
remove_end_sequence: Optional[bool] = None,
remove_input: Optional[bool] = None,
bad_words: Optional[list] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
top_k: Optional[int] = None,
repetition_penalty: Optional[float] = None,
num_beams: Optional[int] = None,
num_return_sequences: Optional[int] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
@classmethod
def get_config(cls):
return {
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
|
(max_length: Optional[int] = None, length_no_input: Optional[bool] = None, end_sequence: Optional[str] = None, remove_end_sequence: Optional[bool] = None, remove_input: Optional[bool] = None, bad_words: Optional[list] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, num_beams: Optional[int] = None, num_return_sequences: Optional[int] = None) -> None
|
63,626 |
litellm.llms.nlp_cloud
|
__init__
| null |
def __init__(
self,
max_length: Optional[int] = None,
length_no_input: Optional[bool] = None,
end_sequence: Optional[str] = None,
remove_end_sequence: Optional[bool] = None,
remove_input: Optional[bool] = None,
bad_words: Optional[list] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
top_k: Optional[int] = None,
repetition_penalty: Optional[float] = None,
num_beams: Optional[int] = None,
num_return_sequences: Optional[int] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
|
(self, max_length: Optional[int] = None, length_no_input: Optional[bool] = None, end_sequence: Optional[str] = None, remove_end_sequence: Optional[bool] = None, remove_input: Optional[bool] = None, bad_words: Optional[list] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, repetition_penalty: Optional[float] = None, num_beams: Optional[int] = None, num_return_sequences: Optional[int] = None) -> NoneType
|
63,627 |
litellm.exceptions
|
NotFoundError
| null |
class NotFoundError(openai.NotFoundError): # type: ignore
def __init__(self, message, model, llm_provider, response: httpx.Response):
self.status_code = 404
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, response=response, body=None
) # Call the base class constructor with the parameters it needs
|
(message, model, llm_provider, response: httpx.Response)
|
63,628 |
litellm.exceptions
|
__init__
| null |
def __init__(self, message, model, llm_provider, response: httpx.Response):
self.status_code = 404
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, response=response, body=None
) # Call the base class constructor with the parameters it needs
|
(self, message, model, llm_provider, response: httpx.Response)
|
63,629 |
litellm.types.llms.openai
|
NotGiven
|
A sentinel singleton class used to distinguish omitted keyword arguments
from those passed in with the value None (which may have different behavior).
For example:
```py
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
...
get(timeout=1) # 1s timeout
get(timeout=None) # No timeout
get() # Default timeout behavior, which may not be statically known at the method definition.
```
|
class NotGiven:
"""
A sentinel singleton class used to distinguish omitted keyword arguments
from those passed in with the value None (which may have different behavior).
For example:
```py
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
...
get(timeout=1) # 1s timeout
get(timeout=None) # No timeout
get() # Default timeout behavior, which may not be statically known at the method definition.
```
"""
def __bool__(self) -> Literal[False]:
return False
@override
def __repr__(self) -> str:
return "NOT_GIVEN"
|
()
|
63,630 |
litellm.types.llms.openai
|
__bool__
| null |
def __bool__(self) -> Literal[False]:
return False
|
(self) -> Literal[False]
|
63,631 |
litellm.types.llms.openai
|
__repr__
| null |
@override
def __repr__(self) -> str:
return "NOT_GIVEN"
|
(self) -> str
|
63,632 |
litellm.llms.ollama_chat
|
OllamaChatConfig
|
Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters:
- `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0
- `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1
- `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0
- `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096
- `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1
- `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0
- `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8
- `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64
- `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1
- `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7
- `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:"
- `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1
- `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42
- `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40
- `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9
- `system` (string): system prompt for model (overrides what is defined in the Modelfile)
- `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile)
|
class OllamaChatConfig:
"""
Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters:
- `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0
- `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1
- `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0
- `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096
- `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1
- `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0
- `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8
- `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64
- `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1
- `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7
- `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:"
- `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1
- `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42
- `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40
- `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9
- `system` (string): system prompt for model (overrides what is defined in the Modelfile)
- `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile)
"""
mirostat: Optional[int] = None
mirostat_eta: Optional[float] = None
mirostat_tau: Optional[float] = None
num_ctx: Optional[int] = None
num_gqa: Optional[int] = None
num_thread: Optional[int] = None
repeat_last_n: Optional[int] = None
repeat_penalty: Optional[float] = None
temperature: Optional[float] = None
stop: Optional[list] = (
None # stop is a list based on this - https://github.com/ollama/ollama/pull/442
)
tfs_z: Optional[float] = None
num_predict: Optional[int] = None
top_k: Optional[int] = None
top_p: Optional[float] = None
system: Optional[str] = None
template: Optional[str] = None
def __init__(
self,
mirostat: Optional[int] = None,
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gqa: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
temperature: Optional[float] = None,
stop: Optional[list] = None,
tfs_z: Optional[float] = None,
num_predict: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
system: Optional[str] = None,
template: Optional[str] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
@classmethod
def get_config(cls):
return {
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and k != "function_name" # special param for function calling
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
def get_supported_openai_params(
self,
):
return [
"max_tokens",
"stream",
"top_p",
"temperature",
"frequency_penalty",
"stop",
"tools",
"tool_choice",
"functions",
"response_format",
]
def map_openai_params(self, non_default_params: dict, optional_params: dict):
for param, value in non_default_params.items():
if param == "max_tokens":
optional_params["num_predict"] = value
if param == "stream":
optional_params["stream"] = value
if param == "temperature":
optional_params["temperature"] = value
if param == "top_p":
optional_params["top_p"] = value
if param == "frequency_penalty":
optional_params["repeat_penalty"] = value
if param == "stop":
optional_params["stop"] = value
if param == "response_format" and value["type"] == "json_object":
optional_params["format"] = "json"
### FUNCTION CALLING LOGIC ###
if param == "tools":
# ollama actually supports json output
optional_params["format"] = "json"
litellm.add_function_to_prompt = (
True # so that main.py adds the function call to the prompt
)
optional_params["functions_unsupported_model"] = value
if len(optional_params["functions_unsupported_model"]) == 1:
optional_params["function_name"] = optional_params[
"functions_unsupported_model"
][0]["function"]["name"]
if param == "functions":
# ollama actually supports json output
optional_params["format"] = "json"
litellm.add_function_to_prompt = (
True # so that main.py adds the function call to the prompt
)
optional_params["functions_unsupported_model"] = non_default_params.get(
"functions"
)
non_default_params.pop("tool_choice", None) # causes ollama requests to hang
non_default_params.pop("functions", None) # causes ollama requests to hang
return optional_params
|
(mirostat: Optional[int] = None, mirostat_eta: Optional[float] = None, mirostat_tau: Optional[float] = None, num_ctx: Optional[int] = None, num_gqa: Optional[int] = None, num_thread: Optional[int] = None, repeat_last_n: Optional[int] = None, repeat_penalty: Optional[float] = None, temperature: Optional[float] = None, stop: Optional[list] = None, tfs_z: Optional[float] = None, num_predict: Optional[int] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, system: Optional[str] = None, template: Optional[str] = None) -> None
|
63,633 |
litellm.llms.ollama_chat
|
__init__
| null |
def __init__(
self,
mirostat: Optional[int] = None,
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gqa: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
temperature: Optional[float] = None,
stop: Optional[list] = None,
tfs_z: Optional[float] = None,
num_predict: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
system: Optional[str] = None,
template: Optional[str] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
|
(self, mirostat: Optional[int] = None, mirostat_eta: Optional[float] = None, mirostat_tau: Optional[float] = None, num_ctx: Optional[int] = None, num_gqa: Optional[int] = None, num_thread: Optional[int] = None, repeat_last_n: Optional[int] = None, repeat_penalty: Optional[float] = None, temperature: Optional[float] = None, stop: Optional[list] = None, tfs_z: Optional[float] = None, num_predict: Optional[int] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, system: Optional[str] = None, template: Optional[str] = None) -> NoneType
|
63,634 |
litellm.llms.ollama_chat
|
get_supported_openai_params
| null |
def get_supported_openai_params(
self,
):
return [
"max_tokens",
"stream",
"top_p",
"temperature",
"frequency_penalty",
"stop",
"tools",
"tool_choice",
"functions",
"response_format",
]
|
(self)
|
63,635 |
litellm.llms.ollama_chat
|
map_openai_params
| null |
def map_openai_params(self, non_default_params: dict, optional_params: dict):
for param, value in non_default_params.items():
if param == "max_tokens":
optional_params["num_predict"] = value
if param == "stream":
optional_params["stream"] = value
if param == "temperature":
optional_params["temperature"] = value
if param == "top_p":
optional_params["top_p"] = value
if param == "frequency_penalty":
optional_params["repeat_penalty"] = value
if param == "stop":
optional_params["stop"] = value
if param == "response_format" and value["type"] == "json_object":
optional_params["format"] = "json"
### FUNCTION CALLING LOGIC ###
if param == "tools":
# ollama actually supports json output
optional_params["format"] = "json"
litellm.add_function_to_prompt = (
True # so that main.py adds the function call to the prompt
)
optional_params["functions_unsupported_model"] = value
if len(optional_params["functions_unsupported_model"]) == 1:
optional_params["function_name"] = optional_params[
"functions_unsupported_model"
][0]["function"]["name"]
if param == "functions":
# ollama actually supports json output
optional_params["format"] = "json"
litellm.add_function_to_prompt = (
True # so that main.py adds the function call to the prompt
)
optional_params["functions_unsupported_model"] = non_default_params.get(
"functions"
)
non_default_params.pop("tool_choice", None) # causes ollama requests to hang
non_default_params.pop("functions", None) # causes ollama requests to hang
return optional_params
|
(self, non_default_params: dict, optional_params: dict)
|
63,636 |
litellm.llms.ollama
|
OllamaConfig
|
Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters:
- `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0
- `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1
- `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0
- `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096
- `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1
- `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0
- `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8
- `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64
- `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1
- `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7
- `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:"
- `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1
- `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42
- `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40
- `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9
- `system` (string): system prompt for model (overrides what is defined in the Modelfile)
- `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile)
|
class OllamaConfig:
"""
Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters:
- `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0
- `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1
- `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0
- `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096
- `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1
- `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0
- `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8
- `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64
- `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1
- `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7
- `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:"
- `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1
- `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42
- `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40
- `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9
- `system` (string): system prompt for model (overrides what is defined in the Modelfile)
- `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile)
"""
mirostat: Optional[int] = None
mirostat_eta: Optional[float] = None
mirostat_tau: Optional[float] = None
num_ctx: Optional[int] = None
num_gqa: Optional[int] = None
num_thread: Optional[int] = None
repeat_last_n: Optional[int] = None
repeat_penalty: Optional[float] = None
temperature: Optional[float] = None
stop: Optional[list] = (
None # stop is a list based on this - https://github.com/ollama/ollama/pull/442
)
tfs_z: Optional[float] = None
num_predict: Optional[int] = None
top_k: Optional[int] = None
top_p: Optional[float] = None
system: Optional[str] = None
template: Optional[str] = None
def __init__(
self,
mirostat: Optional[int] = None,
mirostat_eta: Optional[float] = None,
mirostat_tau: Optional[float] = None,
num_ctx: Optional[int] = None,
num_gqa: Optional[int] = None,
num_thread: Optional[int] = None,
repeat_last_n: Optional[int] = None,
repeat_penalty: Optional[float] = None,
temperature: Optional[float] = None,
stop: Optional[list] = None,
tfs_z: Optional[float] = None,
num_predict: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
system: Optional[str] = None,
template: Optional[str] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
@classmethod
def get_config(cls):
return {
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
|
(mirostat: Optional[int] = None, mirostat_eta: Optional[float] = None, mirostat_tau: Optional[float] = None, num_ctx: Optional[int] = None, num_gqa: Optional[int] = None, num_thread: Optional[int] = None, repeat_last_n: Optional[int] = None, repeat_penalty: Optional[float] = None, temperature: Optional[float] = None, stop: Optional[list] = None, tfs_z: Optional[float] = None, num_predict: Optional[int] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, system: Optional[str] = None, template: Optional[str] = None) -> None
|
63,638 |
openai
|
OpenAI
| null |
from openai import OpenAI
|
(*, api_key: 'str | None' = None, organization: 'str | None' = None, project: 'str | None' = None, base_url: 'str | httpx.URL | None' = None, timeout: 'Union[float, Timeout, None, NotGiven]' = NOT_GIVEN, max_retries: 'int' = 2, default_headers: 'Mapping[str, str] | None' = None, default_query: 'Mapping[str, object] | None' = None, http_client: 'httpx.Client | None' = None, _strict_response_validation: 'bool' = False) -> 'None'
|
63,639 |
openai._base_client
|
__enter__
| null |
def __enter__(self: _T) -> _T:
return self
|
(self: ~_T) -> ~_T
|
63,640 |
openai._base_client
|
__exit__
| null |
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
|
(self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: traceback | None) -> NoneType
|
63,641 |
openai._client
|
__init__
|
Construct a new synchronous openai client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `OPENAI_API_KEY`
- `organization` from `OPENAI_ORG_ID`
- `project` from `OPENAI_PROJECT_ID`
|
def __init__(
self,
*,
api_key: str | None = None,
organization: str | None = None,
project: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
# Configure a custom httpx client.
# We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
# See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: httpx.Client | None = None,
# Enable or disable schema validation for data returned by the API.
# When enabled an error APIResponseValidationError is raised
# if the API responds with invalid data for the expected schema.
#
# This parameter may be removed or changed in the future.
# If you rely on this feature, please open a GitHub issue
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
) -> None:
"""Construct a new synchronous openai client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `OPENAI_API_KEY`
- `organization` from `OPENAI_ORG_ID`
- `project` from `OPENAI_PROJECT_ID`
"""
if api_key is None:
api_key = os.environ.get("OPENAI_API_KEY")
if api_key is None:
raise OpenAIError(
"The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
)
self.api_key = api_key
if organization is None:
organization = os.environ.get("OPENAI_ORG_ID")
self.organization = organization
if project is None:
project = os.environ.get("OPENAI_PROJECT_ID")
self.project = project
if base_url is None:
base_url = os.environ.get("OPENAI_BASE_URL")
if base_url is None:
base_url = f"https://api.openai.com/v1"
super().__init__(
version=__version__,
base_url=base_url,
max_retries=max_retries,
timeout=timeout,
http_client=http_client,
custom_headers=default_headers,
custom_query=default_query,
_strict_response_validation=_strict_response_validation,
)
self._default_stream_cls = Stream
self.completions = resources.Completions(self)
self.chat = resources.Chat(self)
self.embeddings = resources.Embeddings(self)
self.files = resources.Files(self)
self.images = resources.Images(self)
self.audio = resources.Audio(self)
self.moderations = resources.Moderations(self)
self.models = resources.Models(self)
self.fine_tuning = resources.FineTuning(self)
self.beta = resources.Beta(self)
self.batches = resources.Batches(self)
self.with_raw_response = OpenAIWithRawResponse(self)
self.with_streaming_response = OpenAIWithStreamedResponse(self)
|
(self, *, api_key: Optional[str] = None, organization: Optional[str] = None, project: Optional[str] = None, base_url: Union[str, httpx.URL, NoneType] = None, timeout: Union[float, openai.Timeout, NoneType, openai.NotGiven] = NOT_GIVEN, max_retries: int = 2, default_headers: Optional[Mapping[str, str]] = None, default_query: Optional[Mapping[str, object]] = None, http_client: Optional[httpx.Client] = None, _strict_response_validation: bool = False) -> NoneType
|
63,642 |
openai._base_client
|
_build_headers
| null |
def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers:
custom_headers = options.headers or {}
headers_dict = _merge_mappings(self.default_headers, custom_headers)
self._validate_headers(headers_dict, custom_headers)
# headers are case-insensitive while dictionaries are not.
headers = httpx.Headers(headers_dict)
idempotency_header = self._idempotency_header
if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
return headers
|
(self, options: openai._models.FinalRequestOptions) -> httpx.Headers
|
63,643 |
openai._base_client
|
_build_request
| null |
def _build_request(
self,
options: FinalRequestOptions,
) -> httpx.Request:
if log.isEnabledFor(logging.DEBUG):
log.debug("Request options: %s", model_dump(options, exclude_unset=True))
kwargs: dict[str, Any] = {}
json_data = options.json_data
if options.extra_json is not None:
if json_data is None:
json_data = cast(Body, options.extra_json)
elif is_mapping(json_data):
json_data = _merge_mappings(json_data, options.extra_json)
else:
raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
headers = self._build_headers(options)
params = _merge_mappings(self._custom_query, options.params)
content_type = headers.get("Content-Type")
# If the given Content-Type header is multipart/form-data then it
# has to be removed so that httpx can generate the header with
# additional information for us as it has to be in this form
# for the server to be able to correctly parse the request:
# multipart/form-data; boundary=---abc--
if content_type is not None and content_type.startswith("multipart/form-data"):
if "boundary" not in content_type:
# only remove the header if the boundary hasn't been explicitly set
# as the caller doesn't want httpx to come up with their own boundary
headers.pop("Content-Type")
# As we are now sending multipart/form-data instead of application/json
# we need to tell httpx to use it, https://www.python-httpx.org/advanced/#multipart-file-encoding
if json_data:
if not is_dict(json_data):
raise TypeError(
f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead."
)
kwargs["data"] = self._serialize_multipartform(json_data)
# TODO: report this error to httpx
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
headers=headers,
timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
method=options.method,
url=self._prepare_url(options.url),
# the `Query` type that we use is incompatible with qs'
# `Params` type as it needs to be typed as `Mapping[str, object]`
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
json=json_data,
files=options.files,
**kwargs,
)
|
(self, options: openai._models.FinalRequestOptions) -> httpx.Request
|
63,644 |
openai._base_client
|
_calculate_retry_timeout
| null |
def _calculate_retry_timeout(
self,
remaining_retries: int,
options: FinalRequestOptions,
response_headers: Optional[httpx.Headers] = None,
) -> float:
max_retries = options.get_max_retries(self.max_retries)
# If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
retry_after = self._parse_retry_after_header(response_headers)
if retry_after is not None and 0 < retry_after <= 60:
return retry_after
nb_retries = max_retries - remaining_retries
# Apply exponential backoff, but not more than the max.
sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY)
# Apply some jitter, plus-or-minus half a second.
jitter = 1 - 0.25 * random()
timeout = sleep_seconds * jitter
return timeout if timeout >= 0 else 0
|
(self, remaining_retries: int, options: openai._models.FinalRequestOptions, response_headers: Optional[httpx.Headers] = None) -> float
|
63,645 |
openai._base_client
|
_enforce_trailing_slash
| null |
def _enforce_trailing_slash(self, url: URL) -> URL:
if url.raw_path.endswith(b"/"):
return url
return url.copy_with(raw_path=url.raw_path + b"/")
|
(self, url: httpx.URL) -> httpx.URL
|
63,646 |
openai._base_client
|
_idempotency_key
| null |
def _idempotency_key(self) -> str:
return f"stainless-python-retry-{uuid.uuid4()}"
|
(self) -> str
|
63,647 |
openai._base_client
|
_make_sse_decoder
| null |
def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder:
return SSEDecoder()
|
(self) -> openai._streaming.SSEDecoder | openai._streaming.SSEBytesDecoder
|
63,648 |
openai._client
|
_make_status_error
| null |
@override
def _make_status_error(
self,
err_msg: str,
*,
body: object,
response: httpx.Response,
) -> APIStatusError:
data = body.get("error", body) if is_mapping(body) else body
if response.status_code == 400:
return _exceptions.BadRequestError(err_msg, response=response, body=data)
if response.status_code == 401:
return _exceptions.AuthenticationError(err_msg, response=response, body=data)
if response.status_code == 403:
return _exceptions.PermissionDeniedError(err_msg, response=response, body=data)
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=data)
if response.status_code == 409:
return _exceptions.ConflictError(err_msg, response=response, body=data)
if response.status_code == 422:
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data)
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=data)
if response.status_code >= 500:
return _exceptions.InternalServerError(err_msg, response=response, body=data)
return APIStatusError(err_msg, response=response, body=data)
|
(self, err_msg: str, *, body: object, response: httpx.Response) -> openai.APIStatusError
|
63,649 |
openai._base_client
|
_make_status_error_from_response
| null |
def _make_status_error_from_response(
self,
response: httpx.Response,
) -> APIStatusError:
if response.is_closed and not response.is_stream_consumed:
# We can't read the response body as it has been closed
# before it was read. This can happen if an event hook
# raises a status error.
body = None
err_msg = f"Error code: {response.status_code}"
else:
err_text = response.text.strip()
body = err_text
try:
body = json.loads(err_text)
err_msg = f"Error code: {response.status_code} - {body}"
except Exception:
err_msg = err_text or f"Error code: {response.status_code}"
return self._make_status_error(err_msg, body=body, response=response)
|
(self, response: httpx.Response) -> openai.APIStatusError
|
63,650 |
openai._base_client
|
_maybe_override_cast_to
| null |
def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]:
if not is_given(options.headers):
return cast_to
# make a copy of the headers so we don't mutate user-input
headers = dict(options.headers)
# we internally support defining a temporary header to override the
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
# see _response.py for implementation details
override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
if is_given(override_cast_to):
options.headers = headers
return cast(Type[ResponseT], override_cast_to)
return cast_to
|
(self, cast_to: type[~ResponseT], options: openai._models.FinalRequestOptions) -> type[~ResponseT]
|
63,651 |
openai._base_client
|
_parse_retry_after_header
|
Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified.
About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax
|
def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None:
"""Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified.
About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax
"""
if response_headers is None:
return None
# First, try the non-standard `retry-after-ms` header for milliseconds,
# which is more precise than integer-seconds `retry-after`
try:
retry_ms_header = response_headers.get("retry-after-ms", None)
return float(retry_ms_header) / 1000
except (TypeError, ValueError):
pass
# Next, try parsing `retry-after` header as seconds (allowing nonstandard floats).
retry_header = response_headers.get("retry-after")
try:
# note: the spec indicates that this should only ever be an integer
# but if someone sends a float there's no reason for us to not respect it
return float(retry_header)
except (TypeError, ValueError):
pass
# Last, try parsing `retry-after` as a date.
retry_date_tuple = email.utils.parsedate_tz(retry_header)
if retry_date_tuple is None:
return None
retry_date = email.utils.mktime_tz(retry_date_tuple)
return float(retry_date - time.time())
|
(self, response_headers: Optional[httpx.Headers] = None) -> float | None
|
63,652 |
openai._base_client
|
_prepare_options
|
Hook for mutating the given options
|
def _prepare_options(
self,
options: FinalRequestOptions, # noqa: ARG002
) -> None:
"""Hook for mutating the given options"""
return None
|
(self, options: openai._models.FinalRequestOptions) -> NoneType
|
63,653 |
openai._base_client
|
_prepare_request
|
This method is used as a callback for mutating the `Request` object
after it has been constructed.
This is useful for cases where you want to add certain headers based off of
the request properties, e.g. `url`, `method` etc.
|
def _prepare_request(
self,
request: httpx.Request, # noqa: ARG002
) -> None:
"""This method is used as a callback for mutating the `Request` object
after it has been constructed.
This is useful for cases where you want to add certain headers based off of
the request properties, e.g. `url`, `method` etc.
"""
return None
|
(self, request: httpx.Request) -> NoneType
|
63,654 |
openai._base_client
|
_prepare_url
|
Merge a URL argument together with any 'base_url' on the client,
to create the URL used for the outgoing request.
|
def _prepare_url(self, url: str) -> URL:
"""
Merge a URL argument together with any 'base_url' on the client,
to create the URL used for the outgoing request.
"""
# Copied from httpx's `_merge_url` method.
merge_url = URL(url)
if merge_url.is_relative_url:
merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
return self.base_url.copy_with(raw_path=merge_raw_path)
return merge_url
|
(self, url: str) -> httpx.URL
|
63,655 |
openai._base_client
|
_process_response
| null |
def _process_response(
self,
*,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
) -> ResponseT:
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
ResponseT,
LegacyAPIResponse(
raw=response,
client=self,
cast_to=cast_to,
stream=stream,
stream_cls=stream_cls,
options=options,
),
)
origin = get_origin(cast_to) or cast_to
if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
if not issubclass(origin, APIResponse):
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
return cast(
ResponseT,
response_cls(
raw=response,
client=self,
cast_to=extract_response_type(response_cls),
stream=stream,
stream_cls=stream_cls,
options=options,
),
)
if cast_to == httpx.Response:
return cast(ResponseT, response)
api_response = APIResponse(
raw=response,
client=self,
cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast]
stream=stream,
stream_cls=stream_cls,
options=options,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
return api_response.parse()
|
(self, *, cast_to: Type[~ResponseT], options: openai._models.FinalRequestOptions, response: httpx.Response, stream: bool, stream_cls: type[openai.Stream[typing.Any]] | type[openai.AsyncStream[typing.Any]] | None) -> ~ResponseT
|
63,656 |
openai._base_client
|
_process_response_data
| null |
def _process_response_data(
self,
*,
data: object,
cast_to: type[ResponseT],
response: httpx.Response,
) -> ResponseT:
if data is None:
return cast(ResponseT, None)
if cast_to is object:
return cast(ResponseT, data)
try:
if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
return cast(ResponseT, cast_to.build(response=response, data=data))
if self._strict_response_validation:
return cast(ResponseT, validate_type(type_=cast_to, value=data))
return cast(ResponseT, construct_type(type_=cast_to, value=data))
except pydantic.ValidationError as err:
raise APIResponseValidationError(response=response, body=data) from err
|
(self, *, data: object, cast_to: type[~ResponseT], response: httpx.Response) -> ~ResponseT
|
63,657 |
openai._base_client
|
_remaining_retries
| null |
def _remaining_retries(
self,
remaining_retries: Optional[int],
options: FinalRequestOptions,
) -> int:
return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries)
|
(self, remaining_retries: Optional[int], options: openai._models.FinalRequestOptions) -> int
|
63,658 |
openai._base_client
|
_request
| null |
def _request(
self,
*,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
remaining_retries: int | None,
stream: bool,
stream_cls: type[_StreamT] | None,
) -> ResponseT | _StreamT:
cast_to = self._maybe_override_cast_to(cast_to, options)
self._prepare_options(options)
retries = self._remaining_retries(remaining_retries, options)
request = self._build_request(options)
self._prepare_request(request)
kwargs: HttpxSendArgs = {}
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
try:
response = self._client.send(
request,
stream=stream or self._should_stream_response_body(request=request),
**kwargs,
)
except httpx.TimeoutException as err:
log.debug("Encountered httpx.TimeoutException", exc_info=True)
if retries > 0:
return self._retry_request(
options,
cast_to,
retries,
stream=stream,
stream_cls=stream_cls,
response_headers=None,
)
log.debug("Raising timeout error")
raise APITimeoutError(request=request) from err
except Exception as err:
log.debug("Encountered Exception", exc_info=True)
if retries > 0:
return self._retry_request(
options,
cast_to,
retries,
stream=stream,
stream_cls=stream_cls,
response_headers=None,
)
log.debug("Raising connection error")
raise APIConnectionError(request=request) from err
log.debug(
'HTTP Response: %s %s "%i %s" %s',
request.method,
request.url,
response.status_code,
response.reason_phrase,
response.headers,
)
log.debug("request_id: %s", response.headers.get("x-request-id"))
try:
response.raise_for_status()
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
if retries > 0 and self._should_retry(err.response):
err.response.close()
return self._retry_request(
options,
cast_to,
retries,
err.response.headers,
stream=stream,
stream_cls=stream_cls,
)
# If the response is streamed then we need to explicitly read the response
# to completion before attempting to access the response text.
if not err.response.is_closed:
err.response.read()
log.debug("Re-raising status error")
raise self._make_status_error_from_response(err.response) from None
return self._process_response(
cast_to=cast_to,
options=options,
response=response,
stream=stream,
stream_cls=stream_cls,
)
|
(self, *, cast_to: Type[~ResponseT], options: openai._models.FinalRequestOptions, remaining_retries: int | None, stream: bool, stream_cls: type[~_StreamT] | None) -> Union[~ResponseT, ~_StreamT]
|
63,659 |
openai._base_client
|
_request_api_list
| null |
def _request_api_list(
self,
model: Type[object],
page: Type[SyncPageT],
options: FinalRequestOptions,
) -> SyncPageT:
def _parser(resp: SyncPageT) -> SyncPageT:
resp._set_private_attributes(
client=self,
model=model,
options=options,
)
return resp
options.post_parser = _parser
return self.request(page, options, stream=False)
|
(self, model: Type[object], page: Type[~SyncPageT], options: openai._models.FinalRequestOptions) -> ~SyncPageT
|
63,660 |
openai._base_client
|
_retry_request
| null |
def _retry_request(
self,
options: FinalRequestOptions,
cast_to: Type[ResponseT],
remaining_retries: int,
response_headers: httpx.Headers | None,
*,
stream: bool,
stream_cls: type[_StreamT] | None,
) -> ResponseT | _StreamT:
remaining = remaining_retries - 1
if remaining == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining)
timeout = self._calculate_retry_timeout(remaining, options, response_headers)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
# different thread if necessary.
time.sleep(timeout)
return self._request(
options=options,
cast_to=cast_to,
remaining_retries=remaining,
stream=stream,
stream_cls=stream_cls,
)
|
(self, options: openai._models.FinalRequestOptions, cast_to: Type[~ResponseT], remaining_retries: int, response_headers: httpx.Headers | None, *, stream: bool, stream_cls: type[~_StreamT] | None) -> Union[~ResponseT, ~_StreamT]
|
63,661 |
openai._base_client
|
_serialize_multipartform
| null |
def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
items = self.qs.stringify_items(
# TODO: type ignore is required as stringify_items is well typed but we can't be
# well typed without heavy validation.
data, # type: ignore
array_format="brackets",
)
serialized: dict[str, object] = {}
for key, value in items:
existing = serialized.get(key)
if not existing:
serialized[key] = value
continue
# If a value has already been set for this key then that
# means we're sending data like `array[]=[1, 2, 3]` and we
# need to tell httpx that we want to send multiple values with
# the same key which is done by using a list or a tuple.
#
# Note: 2d arrays should never result in the same key at both
# levels so it's safe to assume that if the value is a list,
# it was because we changed it to be a list.
if is_list(existing):
existing.append(value)
else:
serialized[key] = [existing, value]
return serialized
|
(self, data: Mapping[object, object]) -> dict[str, object]
|
63,662 |
openai._base_client
|
_should_retry
| null |
def _should_retry(self, response: httpx.Response) -> bool:
# Note: this is not a standard header
should_retry_header = response.headers.get("x-should-retry")
# If the server explicitly says whether or not to retry, obey.
if should_retry_header == "true":
log.debug("Retrying as header `x-should-retry` is set to `true`")
return True
if should_retry_header == "false":
log.debug("Not retrying as header `x-should-retry` is set to `false`")
return False
# Retry on request timeouts.
if response.status_code == 408:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry on lock timeouts.
if response.status_code == 409:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry on rate limits.
if response.status_code == 429:
log.debug("Retrying due to status code %i", response.status_code)
return True
# Retry internal errors.
if response.status_code >= 500:
log.debug("Retrying due to status code %i", response.status_code)
return True
log.debug("Not retrying")
return False
|
(self, response: httpx.Response) -> bool
|
63,663 |
openai._base_client
|
_should_stream_response_body
| null |
def _should_stream_response_body(self, request: httpx.Request) -> bool:
return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return]
|
(self, request: httpx.Request) -> bool
|
63,664 |
openai._base_client
|
_validate_headers
|
Validate the given default headers and custom headers.
Does nothing by default.
|
def _validate_headers(
self,
headers: Headers, # noqa: ARG002
custom_headers: Headers, # noqa: ARG002
) -> None:
"""Validate the given default headers and custom headers.
Does nothing by default.
"""
return
|
(self, headers: Mapping[str, Union[str, openai._types.Omit]], custom_headers: Mapping[str, Union[str, openai._types.Omit]]) -> NoneType
|
63,665 |
openai._base_client
|
close
|
Close the underlying HTTPX client.
The client will *not* be usable after this.
|
def close(self) -> None:
"""Close the underlying HTTPX client.
The client will *not* be usable after this.
"""
# If an error is thrown while constructing a client, self._client
# may not be present
if hasattr(self, "_client"):
self._client.close()
|
(self) -> NoneType
|
63,666 |
openai._client
|
copy
|
Create a new client instance re-using the same options given to the current client with optional overriding.
|
def copy(
self,
*,
api_key: str | None = None,
organization: str | None = None,
project: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.Client | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
if default_query is not None and set_default_query is not None:
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
headers = self._custom_headers
if default_headers is not None:
headers = {**headers, **default_headers}
elif set_default_headers is not None:
headers = set_default_headers
params = self._custom_query
if default_query is not None:
params = {**params, **default_query}
elif set_default_query is not None:
params = set_default_query
http_client = http_client or self._client
return self.__class__(
api_key=api_key or self.api_key,
organization=organization or self.organization,
project=project or self.project,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
max_retries=max_retries if is_given(max_retries) else self.max_retries,
default_headers=headers,
default_query=params,
**_extra_kwargs,
)
|
(self, *, api_key: Optional[str] = None, organization: Optional[str] = None, project: Optional[str] = None, base_url: Union[str, httpx.URL, NoneType] = None, timeout: float | openai.Timeout | None | openai.NotGiven = NOT_GIVEN, http_client: Optional[httpx.Client] = None, max_retries: int | openai.NotGiven = NOT_GIVEN, default_headers: Optional[Mapping[str, str]] = None, set_default_headers: Optional[Mapping[str, str]] = None, default_query: Optional[Mapping[str, object]] = None, set_default_query: Optional[Mapping[str, object]] = None, _extra_kwargs: Mapping[str, Any] = {}) -> typing_extensions.Self
|
63,667 |
openai._base_client
|
delete
| null |
def delete(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
return self.request(cast_to, opts)
|
(self, path: str, *, cast_to: Type[~ResponseT], body: Optional[object] = None, options: openai.RequestOptions = {}) -> ~ResponseT
|
63,668 |
openai._base_client
|
get
| null |
def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
opts = FinalRequestOptions.construct(method="get", url=path, **options)
# cast is required because mypy complains about returning Any even though
# it understands the type variables
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
|
(self, path: str, *, cast_to: Type[~ResponseT], options: openai.RequestOptions = {}, stream: bool = False, stream_cls: Optional[type[~_StreamT]] = None) -> Union[~ResponseT, ~_StreamT]
|
63,669 |
openai._base_client
|
get_api_list
| null |
def get_api_list(
self,
path: str,
*,
model: Type[object],
page: Type[SyncPageT],
body: Body | None = None,
options: RequestOptions = {},
method: str = "get",
) -> SyncPageT:
opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
return self._request_api_list(model, page, opts)
|
(self, path: str, *, model: Type[object], page: Type[~SyncPageT], body: Optional[object] = None, options: openai.RequestOptions = {}, method: str = 'get') -> ~SyncPageT
|
63,670 |
openai._base_client
|
is_closed
| null |
def is_closed(self) -> bool:
return self._client.is_closed
|
(self) -> bool
|
63,671 |
openai._base_client
|
patch
| null |
def patch(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
return self.request(cast_to, opts)
|
(self, path: str, *, cast_to: Type[~ResponseT], body: Optional[object] = None, options: openai.RequestOptions = {}) -> ~ResponseT
|
63,672 |
openai._base_client
|
platform_headers
| null |
def platform_headers(self) -> Dict[str, str]:
return platform_headers(self._version)
|
(self) -> Dict[str, str]
|
63,673 |
openai._base_client
|
post
| null |
def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
opts = FinalRequestOptions.construct(
method="post", url=path, json_data=body, files=to_httpx_files(files), **options
)
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
|
(self, path: str, *, cast_to: Type[~ResponseT], body: Optional[object] = None, options: openai.RequestOptions = {}, files: Union[Mapping[str, Union[IO[bytes], bytes, os.PathLike, Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str], Mapping[str, str]]]], Sequence[Tuple[str, Union[IO[bytes], bytes, os.PathLike, Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str], Mapping[str, str]]]]], NoneType] = None, stream: bool = False, stream_cls: Optional[type[~_StreamT]] = None) -> Union[~ResponseT, ~_StreamT]
|
63,674 |
openai._base_client
|
put
| null |
def put(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(
method="put", url=path, json_data=body, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
|
(self, path: str, *, cast_to: Type[~ResponseT], body: Optional[object] = None, files: Union[Mapping[str, Union[IO[bytes], bytes, os.PathLike, Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str], Mapping[str, str]]]], Sequence[Tuple[str, Union[IO[bytes], bytes, os.PathLike, Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str]], Tuple[Optional[str], Union[IO[bytes], bytes, os.PathLike], Optional[str], Mapping[str, str]]]]], NoneType] = None, options: openai.RequestOptions = {}) -> ~ResponseT
|
63,675 |
openai._base_client
|
request
| null |
def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
remaining_retries: Optional[int] = None,
*,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
return self._request(
cast_to=cast_to,
options=options,
stream=stream,
stream_cls=stream_cls,
remaining_retries=remaining_retries,
)
|
(self, cast_to: Type[~ResponseT], options: openai._models.FinalRequestOptions, remaining_retries: Optional[int] = None, *, stream: bool = False, stream_cls: Optional[type[~_StreamT]] = None) -> Union[~ResponseT, ~_StreamT]
|
63,677 |
litellm.llms.openai
|
OpenAIAssistantsAPI
| null |
class OpenAIAssistantsAPI(BaseLLM):
def __init__(self) -> None:
super().__init__()
def get_openai_client(
self,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> OpenAI:
received_args = locals()
if client is None:
data = {}
for k, v in received_args.items():
if k == "self" or k == "client":
pass
elif k == "api_base" and v is not None:
data["base_url"] = v
elif v is not None:
data[k] = v
openai_client = OpenAI(**data) # type: ignore
else:
openai_client = client
return openai_client
### ASSISTANTS ###
def get_assistants(
self,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
) -> SyncCursorPage[Assistant]:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.assistants.list()
return response
### MESSAGES ###
def add_message(
self,
thread_id: str,
message_data: MessageData,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> OpenAIMessage:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
thread_message: OpenAIMessage = openai_client.beta.threads.messages.create(
thread_id, **message_data
)
response_obj: Optional[OpenAIMessage] = None
if getattr(thread_message, "status", None) is None:
thread_message.status = "completed"
response_obj = OpenAIMessage(**thread_message.dict())
else:
response_obj = OpenAIMessage(**thread_message.dict())
return response_obj
def get_messages(
self,
thread_id: str,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> SyncCursorPage[OpenAIMessage]:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.threads.messages.list(thread_id=thread_id)
return response
### THREADS ###
def create_thread(
self,
metadata: Optional[dict],
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
messages: Optional[Iterable[OpenAICreateThreadParamsMessage]],
) -> Thread:
"""
Here's an example:
```
from litellm.llms.openai import OpenAIAssistantsAPI, MessageData
# create thread
message: MessageData = {"role": "user", "content": "Hey, how's it going?"}
openai_api.create_thread(messages=[message])
```
"""
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
data = {}
if messages is not None:
data["messages"] = messages # type: ignore
if metadata is not None:
data["metadata"] = metadata # type: ignore
message_thread = openai_client.beta.threads.create(**data) # type: ignore
return Thread(**message_thread.dict())
def get_thread(
self,
thread_id: str,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
) -> Thread:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.threads.retrieve(thread_id=thread_id)
return Thread(**response.dict())
def delete_thread(self):
pass
### RUNS ###
def run_thread(
self,
thread_id: str,
assistant_id: str,
additional_instructions: Optional[str],
instructions: Optional[str],
metadata: Optional[object],
model: Optional[str],
stream: Optional[bool],
tools: Optional[Iterable[AssistantToolParam]],
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
) -> Run:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.threads.runs.create_and_poll(
thread_id=thread_id,
assistant_id=assistant_id,
additional_instructions=additional_instructions,
instructions=instructions,
metadata=metadata,
model=model,
tools=tools,
)
return response
|
() -> None
|
63,681 |
litellm.llms.openai
|
add_message
| null |
def add_message(
self,
thread_id: str,
message_data: MessageData,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> OpenAIMessage:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
thread_message: OpenAIMessage = openai_client.beta.threads.messages.create(
thread_id, **message_data
)
response_obj: Optional[OpenAIMessage] = None
if getattr(thread_message, "status", None) is None:
thread_message.status = "completed"
response_obj = OpenAIMessage(**thread_message.dict())
else:
response_obj = OpenAIMessage(**thread_message.dict())
return response_obj
|
(self, thread_id: str, message_data: litellm.types.llms.openai.MessageData, api_key: Optional[str], api_base: Optional[str], timeout: Union[float, openai.Timeout], max_retries: Optional[int], organization: Optional[str], client: Optional[openai.OpenAI] = None) -> openai.types.beta.threads.message.Message
|
63,682 |
litellm.llms.base
|
completion
| null |
def completion(
self, *args, **kwargs
): # logic for parsing in - calling - parsing out model completion calls
pass
|
(self, *args, **kwargs)
|
63,685 |
litellm.llms.openai
|
create_thread
|
Here's an example:
```
from litellm.llms.openai import OpenAIAssistantsAPI, MessageData
# create thread
message: MessageData = {"role": "user", "content": "Hey, how's it going?"}
openai_api.create_thread(messages=[message])
```
|
def create_thread(
self,
metadata: Optional[dict],
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
messages: Optional[Iterable[OpenAICreateThreadParamsMessage]],
) -> Thread:
"""
Here's an example:
```
from litellm.llms.openai import OpenAIAssistantsAPI, MessageData
# create thread
message: MessageData = {"role": "user", "content": "Hey, how's it going?"}
openai_api.create_thread(messages=[message])
```
"""
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
data = {}
if messages is not None:
data["messages"] = messages # type: ignore
if metadata is not None:
data["metadata"] = metadata # type: ignore
message_thread = openai_client.beta.threads.create(**data) # type: ignore
return Thread(**message_thread.dict())
|
(self, metadata: Optional[dict], api_key: Optional[str], api_base: Optional[str], timeout: Union[float, openai.Timeout], max_retries: Optional[int], organization: Optional[str], client: Optional[openai.OpenAI], messages: Optional[Iterable[openai.types.beta.thread_create_params.Message]]) -> litellm.types.llms.openai.Thread
|
63,686 |
litellm.llms.openai
|
delete_thread
| null |
def delete_thread(self):
pass
|
(self)
|
63,688 |
litellm.llms.openai
|
get_assistants
| null |
def get_assistants(
self,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
) -> SyncCursorPage[Assistant]:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.assistants.list()
return response
|
(self, api_key: Optional[str], api_base: Optional[str], timeout: Union[float, openai.Timeout], max_retries: Optional[int], organization: Optional[str], client: Optional[openai.OpenAI]) -> openai.pagination.SyncCursorPage[Assistant]
|
63,689 |
litellm.llms.openai
|
get_messages
| null |
def get_messages(
self,
thread_id: str,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> SyncCursorPage[OpenAIMessage]:
openai_client = self.get_openai_client(
api_key=api_key,
api_base=api_base,
timeout=timeout,
max_retries=max_retries,
organization=organization,
client=client,
)
response = openai_client.beta.threads.messages.list(thread_id=thread_id)
return response
|
(self, thread_id: str, api_key: Optional[str], api_base: Optional[str], timeout: Union[float, openai.Timeout], max_retries: Optional[int], organization: Optional[str], client: Optional[openai.OpenAI] = None) -> openai.pagination.SyncCursorPage[Message]
|
63,690 |
litellm.llms.openai
|
get_openai_client
| null |
def get_openai_client(
self,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI] = None,
) -> OpenAI:
received_args = locals()
if client is None:
data = {}
for k, v in received_args.items():
if k == "self" or k == "client":
pass
elif k == "api_base" and v is not None:
data["base_url"] = v
elif v is not None:
data[k] = v
openai_client = OpenAI(**data) # type: ignore
else:
openai_client = client
return openai_client
|
(self, api_key: Optional[str], api_base: Optional[str], timeout: Union[float, openai.Timeout], max_retries: Optional[int], organization: Optional[str], client: Optional[openai.OpenAI] = None) -> openai.OpenAI
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.