index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
63,045
litellm.budget_manager
projected_cost
null
def projected_cost(self, model: str, messages: list, user: str): text = "".join(message["content"] for message in messages) prompt_tokens = litellm.token_counter(model=model, text=text) prompt_cost, _ = litellm.cost_per_token( model=model, prompt_tokens=prompt_tokens, completion_tokens=0 ) current_cost = self.user_dict[user].get("current_cost", 0) projected_cost = prompt_cost + current_cost return projected_cost
(self, model: str, messages: list, user: str)
63,046
litellm.budget_manager
reset_cost
null
def reset_cost(self, user): self.user_dict[user]["current_cost"] = 0 self.user_dict[user]["model_cost"] = {} return {"user": self.user_dict[user]}
(self, user)
63,047
litellm.budget_manager
reset_on_duration
null
def reset_on_duration(self, user: str): # Get current and creation time last_updated_at = self.user_dict[user]["last_updated_at"] current_time = time.time() # Convert duration from days to seconds duration_in_seconds = self.user_dict[user]["duration"] * 24 * 60 * 60 # Check if duration has elapsed if current_time - last_updated_at >= duration_in_seconds: # Reset cost if duration has elapsed and update the creation time self.reset_cost(user) self.user_dict[user]["last_updated_at"] = current_time self._save_data_thread() # Save the data
(self, user: str)
63,048
litellm.budget_manager
save_data
null
def save_data(self): if self.client_type == "local": import json # save the user dict with open("user_cost.json", "w") as json_file: json.dump( self.user_dict, json_file, indent=4 ) # Indent for pretty formatting return {"status": "success"} elif self.client_type == "hosted": url = self.api_base + "/set_budget" headers = {"Content-Type": "application/json"} data = {"project_name": self.project_name, "user_dict": self.user_dict} response = requests.post(url, headers=self.headers, json=data) response = response.json() return response
(self)
63,049
litellm.budget_manager
update_budget_all_users
null
def update_budget_all_users(self): for user in self.get_users(): if "duration" in self.user_dict[user]: self.reset_on_duration(user)
(self)
63,050
litellm.budget_manager
update_cost
null
def update_cost( self, user: str, completion_obj: Optional[ModelResponse] = None, model: Optional[str] = None, input_text: Optional[str] = None, output_text: Optional[str] = None, ): if model and input_text and output_text: prompt_tokens = litellm.token_counter( model=model, messages=[{"role": "user", "content": input_text}] ) completion_tokens = litellm.token_counter( model=model, messages=[{"role": "user", "content": output_text}] ) ( prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar, ) = litellm.cost_per_token( model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, ) cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar elif completion_obj: cost = litellm.completion_cost(completion_response=completion_obj) model = completion_obj[ "model" ] # if this throws an error try, model = completion_obj['model'] else: raise ValueError( "Either a chat completion object or the text response needs to be passed in. Learn more - https://docs.litellm.ai/docs/budget_manager" ) self.user_dict[user]["current_cost"] = cost + self.user_dict[user].get( "current_cost", 0 ) if "model_cost" in self.user_dict[user]: self.user_dict[user]["model_cost"][model] = cost + self.user_dict[user][ "model_cost" ].get(model, 0) else: self.user_dict[user]["model_cost"] = {model: cost} self._save_data_thread() # [Non-Blocking] Update persistent storage without blocking execution return {"user": self.user_dict[user]}
(self, user: str, completion_obj: Optional[litellm.utils.ModelResponse] = None, model: Optional[str] = None, input_text: Optional[str] = None, output_text: Optional[str] = None)
63,051
litellm.caching
Cache
null
class Cache: def __init__( self, type: Optional[Literal["local", "redis", "redis-semantic", "s3"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, namespace: Optional[str] = None, ttl: Optional[float] = None, default_in_memory_ttl: Optional[float] = None, default_in_redis_ttl: Optional[float] = None, similarity_threshold: Optional[float] = None, supported_call_types: Optional[ List[ Literal[ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ] ] ] = [ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ], # s3 Bucket, boto3 configuration s3_bucket_name: Optional[str] = None, s3_region_name: Optional[str] = None, s3_api_version: Optional[str] = None, s3_use_ssl: Optional[bool] = True, s3_verify: Optional[Union[bool, str]] = None, s3_endpoint_url: Optional[str] = None, s3_aws_access_key_id: Optional[str] = None, s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, s3_path: Optional[str] = None, redis_semantic_cache_use_async=False, redis_semantic_cache_embedding_model="text-embedding-ada-002", redis_flush_size=None, **kwargs, ): """ Initializes the cache based on the given type. Args: type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", or "s3". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". similarity_threshold (float, optional): The similarity threshold for semantic-caching, Required if type is "redis-semantic" supported_call_types (list, optional): List of call types to cache for. Defaults to cache == on for all call types. **kwargs: Additional keyword arguments for redis.Redis() cache Raises: ValueError: If an invalid cache type is provided. Returns: None. Cache is set as a litellm param """ if type == "redis": self.cache: BaseCache = RedisCache( host, port, password, redis_flush_size, **kwargs ) elif type == "redis-semantic": self.cache = RedisSemanticCache( host, port, password, similarity_threshold=similarity_threshold, use_async=redis_semantic_cache_use_async, embedding_model=redis_semantic_cache_embedding_model, **kwargs, ) elif type == "local": self.cache = InMemoryCache() elif type == "s3": self.cache = S3Cache( s3_bucket_name=s3_bucket_name, s3_region_name=s3_region_name, s3_api_version=s3_api_version, s3_use_ssl=s3_use_ssl, s3_verify=s3_verify, s3_endpoint_url=s3_endpoint_url, s3_aws_access_key_id=s3_aws_access_key_id, s3_aws_secret_access_key=s3_aws_secret_access_key, s3_aws_session_token=s3_aws_session_token, s3_config=s3_config, s3_path=s3_path, **kwargs, ) if "cache" not in litellm.input_callback: litellm.input_callback.append("cache") if "cache" not in litellm.success_callback: litellm.success_callback.append("cache") if "cache" not in litellm._async_success_callback: litellm._async_success_callback.append("cache") self.supported_call_types = supported_call_types # default to ["completion", "acompletion", "embedding", "aembedding"] self.type = type self.namespace = namespace self.redis_flush_size = redis_flush_size self.ttl = ttl if self.type == "local" and default_in_memory_ttl is not None: self.ttl = default_in_memory_ttl if ( self.type == "redis" or self.type == "redis-semantic" ) and default_in_redis_ttl is not None: self.ttl = default_in_redis_ttl if self.namespace is not None and isinstance(self.cache, RedisCache): self.cache.namespace = self.namespace def get_cache_key(self, *args, **kwargs): """ Get the cache key for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: str: The cache key generated from the arguments, or None if no cache key could be generated. """ cache_key = "" print_verbose(f"\nGetting Cache key. Kwargs: {kwargs}") # for streaming, we use preset_cache_key. It's created in wrapper(), we do this because optional params like max_tokens, get transformed for bedrock -> max_new_tokens if kwargs.get("litellm_params", {}).get("preset_cache_key", None) is not None: _preset_cache_key = kwargs.get("litellm_params", {}).get( "preset_cache_key", None ) print_verbose(f"\nReturning preset cache key: {_preset_cache_key}") return _preset_cache_key # sort kwargs by keys, since model: [gpt-4, temperature: 0.2, max_tokens: 200] == [temperature: 0.2, max_tokens: 200, model: gpt-4] completion_kwargs = [ "model", "messages", "temperature", "top_p", "n", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", ] embedding_only_kwargs = [ "input", "encoding_format", ] # embedding kwargs = model, input, user, encoding_format. Model, user are checked in completion_kwargs transcription_only_kwargs = [ "file", "language", ] # combined_kwargs - NEEDS to be ordered across get_cache_key(). Do not use a set() combined_kwargs = ( completion_kwargs + embedding_only_kwargs + transcription_only_kwargs ) for param in combined_kwargs: # ignore litellm params here if param in kwargs: # check if param == model and model_group is passed in, then override model with model_group if param == "model": model_group = None caching_group = None metadata = kwargs.get("metadata", None) litellm_params = kwargs.get("litellm_params", {}) if metadata is not None: model_group = metadata.get("model_group") model_group = metadata.get("model_group", None) caching_groups = metadata.get("caching_groups", None) if caching_groups: for group in caching_groups: if model_group in group: caching_group = group break if litellm_params is not None: metadata = litellm_params.get("metadata", None) if metadata is not None: model_group = metadata.get("model_group", None) caching_groups = metadata.get("caching_groups", None) if caching_groups: for group in caching_groups: if model_group in group: caching_group = group break param_value = ( caching_group or model_group or kwargs[param] ) # use caching_group, if set then model_group if it exists, else use kwargs["model"] elif param == "file": metadata_file_name = kwargs.get("metadata", {}).get( "file_name", None ) litellm_params_file_name = kwargs.get("litellm_params", {}).get( "file_name", None ) if metadata_file_name is not None: param_value = metadata_file_name elif litellm_params_file_name is not None: param_value = litellm_params_file_name else: if kwargs[param] is None: continue # ignore None params param_value = kwargs[param] cache_key += f"{str(param)}: {str(param_value)}" print_verbose(f"\nCreated cache key: {cache_key}") # Use hashlib to create a sha256 hash of the cache key hash_object = hashlib.sha256(cache_key.encode()) # Hexadecimal representation of the hash hash_hex = hash_object.hexdigest() print_verbose(f"Hashed cache key (SHA-256): {hash_hex}") if self.namespace is not None: hash_hex = f"{self.namespace}:{hash_hex}" print_verbose(f"Hashed Key with Namespace: {hash_hex}") elif kwargs.get("metadata", {}).get("redis_namespace", None) is not None: _namespace = kwargs.get("metadata", {}).get("redis_namespace", None) hash_hex = f"{_namespace}:{hash_hex}" print_verbose(f"Hashed Key with Namespace: {hash_hex}") return hash_hex def generate_streaming_content(self, content): chunk_size = 5 # Adjust the chunk size as needed for i in range(0, len(content), chunk_size): yield { "choices": [ { "delta": { "role": "assistant", "content": content[i : i + chunk_size], } } ] } time.sleep(0.02) def _get_cache_logic( self, cached_result: Optional[Any], max_age: Optional[float], ): """ Common get cache logic across sync + async implementations """ # Check if a timestamp was stored with the cached response if ( cached_result is not None and isinstance(cached_result, dict) and "timestamp" in cached_result ): timestamp = cached_result["timestamp"] current_time = time.time() # Calculate age of the cached response response_age = current_time - timestamp # Check if the cached response is older than the max-age if max_age is not None and response_age > max_age: return None # Cached response is too old # If the response is fresh, or there's no max-age requirement, return the cached response # cached_response is in `b{} convert it to ModelResponse cached_response = cached_result.get("response") try: if isinstance(cached_response, dict): pass else: cached_response = json.loads( cached_response # type: ignore ) # Convert string to dictionary except: cached_response = ast.literal_eval(cached_response) # type: ignore return cached_response return cached_result def get_cache(self, *args, **kwargs): """ Retrieves the cached result for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: The cached result if it exists, otherwise None. """ try: # never block execution messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) cached_result = self.cache.get_cache(cache_key, messages=messages) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) except Exception as e: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None async def async_get_cache(self, *args, **kwargs): """ Async get cache implementation. Used for embedding calls in async wrapper """ try: # never block execution messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) cached_result = await self.cache.async_get_cache( cache_key, *args, **kwargs ) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) except Exception as e: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None def _add_cache_logic(self, result, *args, **kwargs): """ Common implementation across sync + async add_cache functions """ try: if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: if isinstance(result, OpenAIObject): result = result.model_dump_json() ## DEFAULT TTL ## if self.ttl is not None: kwargs["ttl"] = self.ttl ## Get Cache-Controls ## if kwargs.get("cache", None) is not None and isinstance( kwargs.get("cache"), dict ): for k, v in kwargs.get("cache").items(): if k == "ttl": kwargs["ttl"] = v cached_data = {"timestamp": time.time(), "response": result} return cache_key, cached_data, kwargs else: raise Exception("cache key is None") except Exception as e: raise e def add_cache(self, result, *args, **kwargs): """ Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None """ try: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() pass async def async_add_cache(self, result, *args, **kwargs): """ Async implementation of add_cache """ try: if self.type == "redis" and self.redis_flush_size is not None: # high traffic - fill in results in memory and then flush await self.batch_cache_write(result, *args, **kwargs) else: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) await self.cache.async_set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() async def async_add_cache_pipeline(self, result, *args, **kwargs): """ Async implementation of add_cache for Embedding calls Does a bulk write, to prevent using too many clients """ try: cache_list = [] for idx, i in enumerate(kwargs["input"]): preset_cache_key = litellm.cache.get_cache_key( *args, **{**kwargs, "input": i} ) kwargs["cache_key"] = preset_cache_key embedding_response = result.data[idx] cache_key, cached_data, kwargs = self._add_cache_logic( result=embedding_response, *args, **kwargs, ) cache_list.append((cache_key, cached_data)) if hasattr(self.cache, "async_set_cache_pipeline"): await self.cache.async_set_cache_pipeline(cache_list=cache_list) else: tasks = [] for val in cache_list: tasks.append( self.cache.async_set_cache(cache_key, cached_data, **kwargs) ) await asyncio.gather(*tasks) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() async def batch_cache_write(self, result, *args, **kwargs): cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) await self.cache.batch_cache_write(cache_key, cached_data, **kwargs) async def ping(self): if hasattr(self.cache, "ping"): return await self.cache.ping() return None async def delete_cache_keys(self, keys): if hasattr(self.cache, "delete_cache_keys"): return await self.cache.delete_cache_keys(keys) return None async def disconnect(self): if hasattr(self.cache, "disconnect"): await self.cache.disconnect()
(type: Optional[Literal['local', 'redis', 'redis-semantic', 's3']] = 'local', host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, namespace: Optional[str] = None, ttl: Optional[float] = None, default_in_memory_ttl: Optional[float] = None, default_in_redis_ttl: Optional[float] = None, similarity_threshold: Optional[float] = None, supported_call_types: Optional[List[Literal['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription']]] = ['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription'], s3_bucket_name: Optional[str] = None, s3_region_name: Optional[str] = None, s3_api_version: Optional[str] = None, s3_use_ssl: Optional[bool] = True, s3_verify: Union[bool, str, NoneType] = None, s3_endpoint_url: Optional[str] = None, s3_aws_access_key_id: Optional[str] = None, s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, s3_path: Optional[str] = None, redis_semantic_cache_use_async=False, redis_semantic_cache_embedding_model='text-embedding-ada-002', redis_flush_size=None, **kwargs)
63,052
litellm.caching
__init__
Initializes the cache based on the given type. Args: type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", or "s3". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". similarity_threshold (float, optional): The similarity threshold for semantic-caching, Required if type is "redis-semantic" supported_call_types (list, optional): List of call types to cache for. Defaults to cache == on for all call types. **kwargs: Additional keyword arguments for redis.Redis() cache Raises: ValueError: If an invalid cache type is provided. Returns: None. Cache is set as a litellm param
def __init__( self, type: Optional[Literal["local", "redis", "redis-semantic", "s3"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, namespace: Optional[str] = None, ttl: Optional[float] = None, default_in_memory_ttl: Optional[float] = None, default_in_redis_ttl: Optional[float] = None, similarity_threshold: Optional[float] = None, supported_call_types: Optional[ List[ Literal[ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ] ] ] = [ "completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription", ], # s3 Bucket, boto3 configuration s3_bucket_name: Optional[str] = None, s3_region_name: Optional[str] = None, s3_api_version: Optional[str] = None, s3_use_ssl: Optional[bool] = True, s3_verify: Optional[Union[bool, str]] = None, s3_endpoint_url: Optional[str] = None, s3_aws_access_key_id: Optional[str] = None, s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, s3_path: Optional[str] = None, redis_semantic_cache_use_async=False, redis_semantic_cache_embedding_model="text-embedding-ada-002", redis_flush_size=None, **kwargs, ): """ Initializes the cache based on the given type. Args: type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", or "s3". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". similarity_threshold (float, optional): The similarity threshold for semantic-caching, Required if type is "redis-semantic" supported_call_types (list, optional): List of call types to cache for. Defaults to cache == on for all call types. **kwargs: Additional keyword arguments for redis.Redis() cache Raises: ValueError: If an invalid cache type is provided. Returns: None. Cache is set as a litellm param """ if type == "redis": self.cache: BaseCache = RedisCache( host, port, password, redis_flush_size, **kwargs ) elif type == "redis-semantic": self.cache = RedisSemanticCache( host, port, password, similarity_threshold=similarity_threshold, use_async=redis_semantic_cache_use_async, embedding_model=redis_semantic_cache_embedding_model, **kwargs, ) elif type == "local": self.cache = InMemoryCache() elif type == "s3": self.cache = S3Cache( s3_bucket_name=s3_bucket_name, s3_region_name=s3_region_name, s3_api_version=s3_api_version, s3_use_ssl=s3_use_ssl, s3_verify=s3_verify, s3_endpoint_url=s3_endpoint_url, s3_aws_access_key_id=s3_aws_access_key_id, s3_aws_secret_access_key=s3_aws_secret_access_key, s3_aws_session_token=s3_aws_session_token, s3_config=s3_config, s3_path=s3_path, **kwargs, ) if "cache" not in litellm.input_callback: litellm.input_callback.append("cache") if "cache" not in litellm.success_callback: litellm.success_callback.append("cache") if "cache" not in litellm._async_success_callback: litellm._async_success_callback.append("cache") self.supported_call_types = supported_call_types # default to ["completion", "acompletion", "embedding", "aembedding"] self.type = type self.namespace = namespace self.redis_flush_size = redis_flush_size self.ttl = ttl if self.type == "local" and default_in_memory_ttl is not None: self.ttl = default_in_memory_ttl if ( self.type == "redis" or self.type == "redis-semantic" ) and default_in_redis_ttl is not None: self.ttl = default_in_redis_ttl if self.namespace is not None and isinstance(self.cache, RedisCache): self.cache.namespace = self.namespace
(self, type: Optional[Literal['local', 'redis', 'redis-semantic', 's3']] = 'local', host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, namespace: Optional[str] = None, ttl: Optional[float] = None, default_in_memory_ttl: Optional[float] = None, default_in_redis_ttl: Optional[float] = None, similarity_threshold: Optional[float] = None, supported_call_types: Optional[List[Literal['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription']]] = ['completion', 'acompletion', 'embedding', 'aembedding', 'atranscription', 'transcription'], s3_bucket_name: Optional[str] = None, s3_region_name: Optional[str] = None, s3_api_version: Optional[str] = None, s3_use_ssl: Optional[bool] = True, s3_verify: Union[bool, str, NoneType] = None, s3_endpoint_url: Optional[str] = None, s3_aws_access_key_id: Optional[str] = None, s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, s3_path: Optional[str] = None, redis_semantic_cache_use_async=False, redis_semantic_cache_embedding_model='text-embedding-ada-002', redis_flush_size=None, **kwargs)
63,053
litellm.caching
_add_cache_logic
Common implementation across sync + async add_cache functions
def _add_cache_logic(self, result, *args, **kwargs): """ Common implementation across sync + async add_cache functions """ try: if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: if isinstance(result, OpenAIObject): result = result.model_dump_json() ## DEFAULT TTL ## if self.ttl is not None: kwargs["ttl"] = self.ttl ## Get Cache-Controls ## if kwargs.get("cache", None) is not None and isinstance( kwargs.get("cache"), dict ): for k, v in kwargs.get("cache").items(): if k == "ttl": kwargs["ttl"] = v cached_data = {"timestamp": time.time(), "response": result} return cache_key, cached_data, kwargs else: raise Exception("cache key is None") except Exception as e: raise e
(self, result, *args, **kwargs)
63,054
litellm.caching
_get_cache_logic
Common get cache logic across sync + async implementations
def _get_cache_logic( self, cached_result: Optional[Any], max_age: Optional[float], ): """ Common get cache logic across sync + async implementations """ # Check if a timestamp was stored with the cached response if ( cached_result is not None and isinstance(cached_result, dict) and "timestamp" in cached_result ): timestamp = cached_result["timestamp"] current_time = time.time() # Calculate age of the cached response response_age = current_time - timestamp # Check if the cached response is older than the max-age if max_age is not None and response_age > max_age: return None # Cached response is too old # If the response is fresh, or there's no max-age requirement, return the cached response # cached_response is in `b{} convert it to ModelResponse cached_response = cached_result.get("response") try: if isinstance(cached_response, dict): pass else: cached_response = json.loads( cached_response # type: ignore ) # Convert string to dictionary except: cached_response = ast.literal_eval(cached_response) # type: ignore return cached_response return cached_result
(self, cached_result: Optional[Any], max_age: Optional[float])
63,055
litellm.caching
add_cache
Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None
def add_cache(self, result, *args, **kwargs): """ Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None """ try: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() pass
(self, result, *args, **kwargs)
63,056
litellm.caching
async_add_cache
Async implementation of add_cache
def add_cache(self, result, *args, **kwargs): """ Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None """ try: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() pass
(self, result, *args, **kwargs)
63,057
litellm.caching
async_add_cache_pipeline
Async implementation of add_cache for Embedding calls Does a bulk write, to prevent using too many clients
def add_cache(self, result, *args, **kwargs): """ Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None """ try: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() pass
(self, result, *args, **kwargs)
63,058
litellm.caching
async_get_cache
Async get cache implementation. Used for embedding calls in async wrapper
def get_cache(self, *args, **kwargs): """ Retrieves the cached result for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: The cached result if it exists, otherwise None. """ try: # never block execution messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) cached_result = self.cache.get_cache(cache_key, messages=messages) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) except Exception as e: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None
(self, *args, **kwargs)
63,059
litellm.caching
batch_cache_write
null
def add_cache(self, result, *args, **kwargs): """ Adds a result to the cache. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: None """ try: cache_key, cached_data, kwargs = self._add_cache_logic( result=result, *args, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") traceback.print_exc() pass
(self, result, *args, **kwargs)
63,062
litellm.caching
generate_streaming_content
null
def generate_streaming_content(self, content): chunk_size = 5 # Adjust the chunk size as needed for i in range(0, len(content), chunk_size): yield { "choices": [ { "delta": { "role": "assistant", "content": content[i : i + chunk_size], } } ] } time.sleep(0.02)
(self, content)
63,063
litellm.caching
get_cache
Retrieves the cached result for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: The cached result if it exists, otherwise None.
def get_cache(self, *args, **kwargs): """ Retrieves the cached result for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: The cached result if it exists, otherwise None. """ try: # never block execution messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) cached_result = self.cache.get_cache(cache_key, messages=messages) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) except Exception as e: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None
(self, *args, **kwargs)
63,064
litellm.caching
get_cache_key
Get the cache key for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: str: The cache key generated from the arguments, or None if no cache key could be generated.
def get_cache_key(self, *args, **kwargs): """ Get the cache key for the given arguments. Args: *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: str: The cache key generated from the arguments, or None if no cache key could be generated. """ cache_key = "" print_verbose(f"\nGetting Cache key. Kwargs: {kwargs}") # for streaming, we use preset_cache_key. It's created in wrapper(), we do this because optional params like max_tokens, get transformed for bedrock -> max_new_tokens if kwargs.get("litellm_params", {}).get("preset_cache_key", None) is not None: _preset_cache_key = kwargs.get("litellm_params", {}).get( "preset_cache_key", None ) print_verbose(f"\nReturning preset cache key: {_preset_cache_key}") return _preset_cache_key # sort kwargs by keys, since model: [gpt-4, temperature: 0.2, max_tokens: 200] == [temperature: 0.2, max_tokens: 200, model: gpt-4] completion_kwargs = [ "model", "messages", "temperature", "top_p", "n", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", ] embedding_only_kwargs = [ "input", "encoding_format", ] # embedding kwargs = model, input, user, encoding_format. Model, user are checked in completion_kwargs transcription_only_kwargs = [ "file", "language", ] # combined_kwargs - NEEDS to be ordered across get_cache_key(). Do not use a set() combined_kwargs = ( completion_kwargs + embedding_only_kwargs + transcription_only_kwargs ) for param in combined_kwargs: # ignore litellm params here if param in kwargs: # check if param == model and model_group is passed in, then override model with model_group if param == "model": model_group = None caching_group = None metadata = kwargs.get("metadata", None) litellm_params = kwargs.get("litellm_params", {}) if metadata is not None: model_group = metadata.get("model_group") model_group = metadata.get("model_group", None) caching_groups = metadata.get("caching_groups", None) if caching_groups: for group in caching_groups: if model_group in group: caching_group = group break if litellm_params is not None: metadata = litellm_params.get("metadata", None) if metadata is not None: model_group = metadata.get("model_group", None) caching_groups = metadata.get("caching_groups", None) if caching_groups: for group in caching_groups: if model_group in group: caching_group = group break param_value = ( caching_group or model_group or kwargs[param] ) # use caching_group, if set then model_group if it exists, else use kwargs["model"] elif param == "file": metadata_file_name = kwargs.get("metadata", {}).get( "file_name", None ) litellm_params_file_name = kwargs.get("litellm_params", {}).get( "file_name", None ) if metadata_file_name is not None: param_value = metadata_file_name elif litellm_params_file_name is not None: param_value = litellm_params_file_name else: if kwargs[param] is None: continue # ignore None params param_value = kwargs[param] cache_key += f"{str(param)}: {str(param_value)}" print_verbose(f"\nCreated cache key: {cache_key}") # Use hashlib to create a sha256 hash of the cache key hash_object = hashlib.sha256(cache_key.encode()) # Hexadecimal representation of the hash hash_hex = hash_object.hexdigest() print_verbose(f"Hashed cache key (SHA-256): {hash_hex}") if self.namespace is not None: hash_hex = f"{self.namespace}:{hash_hex}" print_verbose(f"Hashed Key with Namespace: {hash_hex}") elif kwargs.get("metadata", {}).get("redis_namespace", None) is not None: _namespace = kwargs.get("metadata", {}).get("redis_namespace", None) hash_hex = f"{_namespace}:{hash_hex}" print_verbose(f"Hashed Key with Namespace: {hash_hex}") return hash_hex
(self, *args, **kwargs)
63,066
litellm.main
Chat
null
class Chat: def __init__(self, params, router_obj: Optional[Any]): self.params = params if self.params.get("acompletion", False) == True: self.params.pop("acompletion") self.completions: Union[AsyncCompletions, Completions] = AsyncCompletions( self.params, router_obj=router_obj ) else: self.completions = Completions(self.params, router_obj=router_obj)
(params, router_obj: Optional[Any])
63,067
litellm.main
__init__
null
def __init__(self, params, router_obj: Optional[Any]): self.params = params if self.params.get("acompletion", False) == True: self.params.pop("acompletion") self.completions: Union[AsyncCompletions, Completions] = AsyncCompletions( self.params, router_obj=router_obj ) else: self.completions = Completions(self.params, router_obj=router_obj)
(self, params, router_obj: Optional[Any])
63,068
litellm.utils
Choices
null
class Choices(OpenAIObject): def __init__( self, finish_reason=None, index=0, message=None, logprobs=None, enhancements=None, **params, ): super(Choices, self).__init__(**params) self.finish_reason = ( map_finish_reason(finish_reason) or "stop" ) # set finish_reason for all responses self.index = index if message is None: self.message = Message() else: if isinstance(message, Message): self.message = message elif isinstance(message, dict): self.message = Message(**message) if logprobs is not None: self.logprobs = logprobs if enhancements is not None: self.enhancements = enhancements def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(finish_reason=None, index=0, message=None, logprobs=None, enhancements=None, **params)
63,069
litellm.utils
__contains__
null
def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key)
(self, key)
63,075
litellm.utils
__getitem__
null
def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key)
(self, key)
63,077
litellm.utils
__init__
null
def __init__( self, finish_reason=None, index=0, message=None, logprobs=None, enhancements=None, **params, ): super(Choices, self).__init__(**params) self.finish_reason = ( map_finish_reason(finish_reason) or "stop" ) # set finish_reason for all responses self.index = index if message is None: self.message = Message() else: if isinstance(message, Message): self.message = message elif isinstance(message, dict): self.message = Message(**message) if logprobs is not None: self.logprobs = logprobs if enhancements is not None: self.enhancements = enhancements
(self, finish_reason=None, index=0, message=None, logprobs=None, enhancements=None, **params)
63,086
litellm.utils
__setitem__
null
def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(self, key, value)
63,095
litellm.utils
get
null
def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default)
(self, key, default=None)
63,103
litellm.llms.clarifai
ClarifaiConfig
Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat TODO fill in the details
class ClarifaiConfig: """ Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat TODO fill in the details """ max_tokens: Optional[int] = None temperature: Optional[int] = None top_k: Optional[int] = None def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[int] = None, top_k: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens: Optional[int] = None, temperature: Optional[int] = None, top_k: Optional[int] = None) -> None
63,104
litellm.llms.clarifai
__init__
null
def __init__( self, max_tokens: Optional[int] = None, temperature: Optional[int] = None, top_k: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, temperature: Optional[int] = None, top_k: Optional[int] = None) -> NoneType
63,105
litellm.llms.cloudflare
CloudflareConfig
null
class CloudflareConfig: max_tokens: Optional[int] = None stream: Optional[bool] = None def __init__( self, max_tokens: Optional[int] = None, stream: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(max_tokens: Optional[int] = None, stream: Optional[bool] = None) -> None
63,106
litellm.llms.cloudflare
__init__
null
def __init__( self, max_tokens: Optional[int] = None, stream: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, max_tokens: Optional[int] = None, stream: Optional[bool] = None) -> NoneType
63,107
litellm.types.llms.openai
CodeInterpreterToolParam
null
class CodeInterpreterToolParam(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`"""
null
63,108
litellm.llms.cohere
CohereConfig
Reference: https://docs.cohere.com/reference/generate The class `CohereConfig` provides configuration for the Cohere's API interface. Below are the parameters: - `num_generations` (integer): Maximum number of generations returned. Default is 1, with a minimum value of 1 and a maximum value of 5. - `max_tokens` (integer): Maximum number of tokens the model will generate as part of the response. Default value is 20. - `truncate` (string): Specifies how the API handles inputs longer than maximum token length. Options include NONE, START, END. Default is END. - `temperature` (number): A non-negative float controlling the randomness in generation. Lower temperatures result in less random generations. Default is 0.75. - `preset` (string): Identifier of a custom preset, a combination of parameters such as prompt, temperature etc. - `end_sequences` (array of strings): The generated text gets cut at the beginning of the earliest occurrence of an end sequence, which will be excluded from the text. - `stop_sequences` (array of strings): The generated text gets cut at the end of the earliest occurrence of a stop sequence, which will be included in the text. - `k` (integer): Limits generation at each step to top `k` most likely tokens. Default is 0. - `p` (number): Limits generation at each step to most likely tokens with total probability mass of `p`. Default is 0. - `frequency_penalty` (number): Reduces repetitiveness of generated tokens. Higher values apply stronger penalties to previously occurred tokens. - `presence_penalty` (number): Reduces repetitiveness of generated tokens. Similar to frequency_penalty, but this penalty applies equally to all tokens that have already appeared. - `return_likelihoods` (string): Specifies how and if token likelihoods are returned with the response. Options include GENERATION, ALL and NONE. - `logit_bias` (object): Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. e.g. {"hello_world": 1233}
class CohereConfig: """ Reference: https://docs.cohere.com/reference/generate The class `CohereConfig` provides configuration for the Cohere's API interface. Below are the parameters: - `num_generations` (integer): Maximum number of generations returned. Default is 1, with a minimum value of 1 and a maximum value of 5. - `max_tokens` (integer): Maximum number of tokens the model will generate as part of the response. Default value is 20. - `truncate` (string): Specifies how the API handles inputs longer than maximum token length. Options include NONE, START, END. Default is END. - `temperature` (number): A non-negative float controlling the randomness in generation. Lower temperatures result in less random generations. Default is 0.75. - `preset` (string): Identifier of a custom preset, a combination of parameters such as prompt, temperature etc. - `end_sequences` (array of strings): The generated text gets cut at the beginning of the earliest occurrence of an end sequence, which will be excluded from the text. - `stop_sequences` (array of strings): The generated text gets cut at the end of the earliest occurrence of a stop sequence, which will be included in the text. - `k` (integer): Limits generation at each step to top `k` most likely tokens. Default is 0. - `p` (number): Limits generation at each step to most likely tokens with total probability mass of `p`. Default is 0. - `frequency_penalty` (number): Reduces repetitiveness of generated tokens. Higher values apply stronger penalties to previously occurred tokens. - `presence_penalty` (number): Reduces repetitiveness of generated tokens. Similar to frequency_penalty, but this penalty applies equally to all tokens that have already appeared. - `return_likelihoods` (string): Specifies how and if token likelihoods are returned with the response. Options include GENERATION, ALL and NONE. - `logit_bias` (object): Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. e.g. {"hello_world": 1233} """ num_generations: Optional[int] = None max_tokens: Optional[int] = None truncate: Optional[str] = None temperature: Optional[int] = None preset: Optional[str] = None end_sequences: Optional[list] = None stop_sequences: Optional[list] = None k: Optional[int] = None p: Optional[int] = None frequency_penalty: Optional[int] = None presence_penalty: Optional[int] = None return_likelihoods: Optional[str] = None logit_bias: Optional[dict] = None def __init__( self, num_generations: Optional[int] = None, max_tokens: Optional[int] = None, truncate: Optional[str] = None, temperature: Optional[int] = None, preset: Optional[str] = None, end_sequences: Optional[list] = None, stop_sequences: Optional[list] = None, k: Optional[int] = None, p: Optional[int] = None, frequency_penalty: Optional[int] = None, presence_penalty: Optional[int] = None, return_likelihoods: Optional[str] = None, logit_bias: Optional[dict] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(num_generations: Optional[int] = None, max_tokens: Optional[int] = None, truncate: Optional[str] = None, temperature: Optional[int] = None, preset: Optional[str] = None, end_sequences: Optional[list] = None, stop_sequences: Optional[list] = None, k: Optional[int] = None, p: Optional[int] = None, frequency_penalty: Optional[int] = None, presence_penalty: Optional[int] = None, return_likelihoods: Optional[str] = None, logit_bias: Optional[dict] = None) -> None
63,109
litellm.llms.cohere
__init__
null
def __init__( self, num_generations: Optional[int] = None, max_tokens: Optional[int] = None, truncate: Optional[str] = None, temperature: Optional[int] = None, preset: Optional[str] = None, end_sequences: Optional[list] = None, stop_sequences: Optional[list] = None, k: Optional[int] = None, p: Optional[int] = None, frequency_penalty: Optional[int] = None, presence_penalty: Optional[int] = None, return_likelihoods: Optional[str] = None, logit_bias: Optional[dict] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, num_generations: Optional[int] = None, max_tokens: Optional[int] = None, truncate: Optional[str] = None, temperature: Optional[int] = None, preset: Optional[str] = None, end_sequences: Optional[list] = None, stop_sequences: Optional[list] = None, k: Optional[int] = None, p: Optional[int] = None, frequency_penalty: Optional[int] = None, presence_penalty: Optional[int] = None, return_likelihoods: Optional[str] = None, logit_bias: Optional[dict] = None) -> NoneType
63,110
litellm.types.completion
CompletionRequest
null
class CompletionRequest(BaseModel): model: str messages: List[str] = [] timeout: Optional[Union[float, int]] = None temperature: Optional[float] = None top_p: Optional[float] = None n: Optional[int] = None stream: Optional[bool] = None stop: Optional[dict] = None max_tokens: Optional[int] = None presence_penalty: Optional[float] = None frequency_penalty: Optional[float] = None logit_bias: Optional[dict] = None user: Optional[str] = None response_format: Optional[dict] = None seed: Optional[int] = None tools: Optional[List[str]] = None tool_choice: Optional[str] = None logprobs: Optional[bool] = None top_logprobs: Optional[int] = None deployment_id: Optional[str] = None functions: Optional[List[str]] = None function_call: Optional[str] = None base_url: Optional[str] = None api_version: Optional[str] = None api_key: Optional[str] = None model_list: Optional[List[str]] = None class Config: extra = "allow" protected_namespaces = ()
(*, model: str, messages: List[str] = [], timeout: Union[float, int, NoneType] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, stream: Optional[bool] = None, stop: Optional[dict] = None, max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, user: Optional[str] = None, response_format: Optional[dict] = None, seed: Optional[int] = None, tools: Optional[List[str]] = None, tool_choice: Optional[str] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, deployment_id: Optional[str] = None, functions: Optional[List[str]] = None, function_call: Optional[str] = None, base_url: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, model_list: Optional[List[str]] = None, **extra_data: Any) -> None
63,139
litellm.main
Completions
null
class Completions: def __init__(self, params, router_obj: Optional[Any]): self.params = params self.router_obj = router_obj def create(self, messages, model=None, **kwargs): for k, v in kwargs.items(): self.params[k] = v model = model or self.params.get("model") if self.router_obj is not None: response = self.router_obj.completion( model=model, messages=messages, **self.params ) else: response = completion(model=model, messages=messages, **self.params) return response
(params, router_obj: Optional[Any])
63,141
litellm.main
create
null
def create(self, messages, model=None, **kwargs): for k, v in kwargs.items(): self.params[k] = v model = model or self.params.get("model") if self.router_obj is not None: response = self.router_obj.completion( model=model, messages=messages, **self.params ) else: response = completion(model=model, messages=messages, **self.params) return response
(self, messages, model=None, **kwargs)
63,142
litellm.exceptions
ContentPolicyViolationError
null
class ContentPolicyViolationError(BadRequestError): # type: ignore # Error code: 400 - {'error': {'code': 'content_policy_violation', 'message': 'Your request was rejected as a result of our safety system. Image descriptions generated from your prompt may contain text that is not allowed by our safety system. If you believe this was done in error, your request may succeed if retried, or by adjusting your prompt.', 'param': None, 'type': 'invalid_request_error'}} def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( message=self.message, model=self.model, # type: ignore llm_provider=self.llm_provider, # type: ignore response=response, ) # Call the base class constructor with the parameters it needs
(message, model, llm_provider, response: httpx.Response)
63,143
litellm.exceptions
__init__
null
def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( message=self.message, model=self.model, # type: ignore llm_provider=self.llm_provider, # type: ignore response=response, ) # Call the base class constructor with the parameters it needs
(self, message, model, llm_provider, response: httpx.Response)
63,144
litellm.exceptions
ContextWindowExceededError
null
class ContextWindowExceededError(BadRequestError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( message=self.message, model=self.model, # type: ignore llm_provider=self.llm_provider, # type: ignore response=response, ) # Call the base class constructor with the parameters it needs
(message, model, llm_provider, response: httpx.Response)
63,146
litellm.utils
CustomStreamWrapper
null
class CustomStreamWrapper: def __init__( self, completion_stream, model, custom_llm_provider=None, logging_obj=None, stream_options=None, ): self.model = model self.custom_llm_provider = custom_llm_provider self.logging_obj = logging_obj self.completion_stream = completion_stream self.sent_first_chunk = False self.sent_last_chunk = False self.system_fingerprint: Optional[str] = None self.received_finish_reason: Optional[str] = None self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "<s>", "</s>"] self.holding_chunk = "" self.complete_response = "" self.response_uptil_now = "" _model_info = ( self.logging_obj.model_call_details.get("litellm_params", {}).get( "model_info", {} ) or {} ) self._hidden_params = { "model_id": (_model_info.get("id", None)) } # returned as x-litellm-model-id response header in proxy self.response_id = None self.logging_loop = None self.rules = Rules() self.stream_options = stream_options def __iter__(self): return self def __aiter__(self): return self def process_chunk(self, chunk: str): """ NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. """ try: chunk = chunk.strip() self.complete_response = self.complete_response.strip() if chunk.startswith(self.complete_response): # Remove last_sent_chunk only if it appears at the start of the new chunk chunk = chunk[len(self.complete_response) :] self.complete_response += chunk return chunk except Exception as e: raise e def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): """ Output parse <s> / </s> special tokens for sagemaker + hf streaming. """ hold = False if ( self.custom_llm_provider != "huggingface" and self.custom_llm_provider != "sagemaker" ): return hold, chunk if finish_reason: for token in self.special_tokens: if token in chunk: chunk = chunk.replace(token, "") return hold, chunk if self.sent_first_chunk is True: return hold, chunk curr_chunk = self.holding_chunk + chunk curr_chunk = curr_chunk.strip() for token in self.special_tokens: if len(curr_chunk) < len(token) and curr_chunk in token: hold = True self.holding_chunk = curr_chunk elif len(curr_chunk) >= len(token): if token in curr_chunk: self.holding_chunk = curr_chunk.replace(token, "") hold = True else: pass if hold is False: # reset self.holding_chunk = "" return hold, curr_chunk def handle_anthropic_text_chunk(self, chunk): str_line = chunk if isinstance(chunk, bytes): # Handle binary data str_line = chunk.decode("utf-8") # Convert bytes to string text = "" is_finished = False finish_reason = None if str_line.startswith("data:"): data_json = json.loads(str_line[5:]) type_chunk = data_json.get("type", None) if type_chunk == "completion": text = data_json.get("completion") finish_reason = data_json.get("stop_reason") if finish_reason is not None: is_finished = True return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in str_line: raise ValueError(f"Unable to parse response. Original response: {str_line}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } def handle_anthropic_chunk(self, chunk): str_line = chunk if isinstance(chunk, bytes): # Handle binary data str_line = chunk.decode("utf-8") # Convert bytes to string text = "" is_finished = False finish_reason = None if str_line.startswith("data:"): data_json = json.loads(str_line[5:]) type_chunk = data_json.get("type", None) if type_chunk == "content_block_delta": """ Anthropic content chunk chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} """ text = data_json.get("delta", {}).get("text", "") elif type_chunk == "message_delta": """ Anthropic chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} """ # TODO - get usage from this chunk, set in response finish_reason = data_json.get("delta", {}).get("stop_reason", None) is_finished = True return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in str_line: raise ValueError(f"Unable to parse response. Original response: {str_line}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } def handle_vertexai_anthropic_chunk(self, chunk): """ - MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai """ text = "" prompt_tokens = None completion_tokens = None is_finished = False finish_reason = None type_chunk = getattr(chunk, "type", None) if type_chunk == "message_start": message = getattr(chunk, "message", None) text = "" # lets us return a chunk with usage to user _usage = getattr(message, "usage", None) if _usage is not None: prompt_tokens = getattr(_usage, "input_tokens", None) completion_tokens = getattr(_usage, "output_tokens", None) elif type_chunk == "content_block_delta": """ Anthropic content chunk chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} """ delta = getattr(chunk, "delta", None) if delta is not None: text = getattr(delta, "text", "") else: text = "" elif type_chunk == "message_delta": """ Anthropic chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} """ # TODO - get usage from this chunk, set in response delta = getattr(chunk, "delta", None) if delta is not None: finish_reason = getattr(delta, "stop_reason", "stop") is_finished = True _usage = getattr(chunk, "usage", None) if _usage is not None: prompt_tokens = getattr(_usage, "input_tokens", None) completion_tokens = getattr(_usage, "output_tokens", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, } def handle_together_ai_chunk(self, chunk): chunk = chunk.decode("utf-8") text = "" is_finished = False finish_reason = None if "text" in chunk: text_index = chunk.find('"text":"') # this checks if text: exists text_start = text_index + len('"text":"') text_end = chunk.find('"}', text_start) if text_index != -1 and text_end != -1: extracted_text = chunk[text_start:text_end] text = extracted_text return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "[DONE]" in chunk: return {"text": text, "is_finished": True, "finish_reason": "stop"} elif "error" in chunk: raise litellm.together_ai.TogetherAIError( status_code=422, message=f"{str(chunk)}" ) else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } def handle_predibase_chunk(self, chunk): try: if type(chunk) != str: chunk = chunk.decode( "utf-8" ) # DO NOT REMOVE this: This is required for HF inference API + Streaming text = "" is_finished = False finish_reason = "" print_verbose(f"chunk: {chunk}") if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) print_verbose(f"data json: {data_json}") if "token" in data_json and "text" in data_json["token"]: text = data_json["token"]["text"] if data_json.get("details", False) and data_json["details"].get( "finish_reason", False ): is_finished = True finish_reason = data_json["details"]["finish_reason"] elif data_json.get( "generated_text", False ): # if full generated text exists, then stream is complete text = "" # don't return the final bos token is_finished = True finish_reason = "stop" elif data_json.get("error", False): raise Exception(data_json.get("error")) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in chunk: raise ValueError(chunk) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: traceback.print_exc() raise e def handle_huggingface_chunk(self, chunk): try: if type(chunk) != str: chunk = chunk.decode( "utf-8" ) # DO NOT REMOVE this: This is required for HF inference API + Streaming text = "" is_finished = False finish_reason = "" print_verbose(f"chunk: {chunk}") if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) print_verbose(f"data json: {data_json}") if "token" in data_json and "text" in data_json["token"]: text = data_json["token"]["text"] if data_json.get("details", False) and data_json["details"].get( "finish_reason", False ): is_finished = True finish_reason = data_json["details"]["finish_reason"] elif data_json.get( "generated_text", False ): # if full generated text exists, then stream is complete text = "" # don't return the final bos token is_finished = True finish_reason = "stop" elif data_json.get("error", False): raise Exception(data_json.get("error")) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in chunk: raise ValueError(chunk) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: traceback.print_exc() raise e def handle_ai21_chunk(self, chunk): # fake streaming chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["completions"][0]["data"]["text"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_maritalk_chunk(self, chunk): # fake streaming chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["answer"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_nlp_cloud_chunk(self, chunk): text = "" is_finished = False finish_reason = "" try: if "dolphin" in self.model: chunk = self.process_chunk(chunk=chunk) else: data_json = json.loads(chunk) chunk = data_json["generated_text"] text = chunk if "[DONE]" in text: text = text.replace("[DONE]", "") is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_aleph_alpha_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["completions"][0]["completion"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_cohere_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = "" is_finished = False finish_reason = "" if "text" in data_json: text = data_json["text"] elif "is_finished" in data_json: is_finished = data_json["is_finished"] finish_reason = data_json["finish_reason"] else: raise Exception(data_json) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_cohere_chat_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) print_verbose(f"chunk: {chunk}") try: text = "" is_finished = False finish_reason = "" if "text" in data_json: text = data_json["text"] elif "is_finished" in data_json and data_json["is_finished"] == True: is_finished = data_json["is_finished"] finish_reason = data_json["finish_reason"] else: return return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_azure_chunk(self, chunk): is_finished = False finish_reason = "" text = "" print_verbose(f"chunk: {chunk}") if "data: [DONE]" in chunk: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif chunk.startswith("data:"): data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): try: if len(data_json["choices"]) > 0: text = data_json["choices"][0]["delta"].get("content", "") if data_json["choices"][0].get("finish_reason", None): is_finished = True finish_reason = data_json["choices"][0]["finish_reason"] print_verbose( f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" ) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError( f"Unable to parse response. Original response: {chunk}" ) elif "error" in chunk: raise ValueError(f"Unable to parse response. Original response: {chunk}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } def handle_replicate_chunk(self, chunk): try: text = "" is_finished = False finish_reason = "" if "output" in chunk: text = chunk["output"] if "status" in chunk: if chunk["status"] == "succeeded": is_finished = True finish_reason = "stop" elif chunk.get("error", None): raise Exception(chunk["error"]) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}") def handle_openai_chat_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") str_line = chunk text = "" is_finished = False finish_reason = None logprobs = None usage = None original_chunk = None # this is used for function/tool calling if len(str_line.choices) > 0: if ( str_line.choices[0].delta is not None and str_line.choices[0].delta.content is not None ): text = str_line.choices[0].delta.content else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai original_chunk = str_line if str_line.choices[0].finish_reason: is_finished = True finish_reason = str_line.choices[0].finish_reason if finish_reason == "content_filter": if hasattr(str_line.choices[0], "content_filter_result"): error_message = json.dumps( str_line.choices[0].content_filter_result ) else: error_message = "Azure Response={}".format( str(dict(str_line)) ) raise litellm.AzureOpenAIError( status_code=400, message=error_message ) # checking for logprobs if ( hasattr(str_line.choices[0], "logprobs") and str_line.choices[0].logprobs is not None ): logprobs = str_line.choices[0].logprobs else: logprobs = None usage = getattr(str_line, "usage", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "logprobs": logprobs, "original_chunk": str_line, "usage": usage, } except Exception as e: traceback.print_exc() raise e def handle_azure_text_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") text = "" is_finished = False finish_reason = None choices = getattr(chunk, "choices", []) if len(choices) > 0: text = choices[0].text if choices[0].finish_reason is not None: is_finished = True finish_reason = choices[0].finish_reason return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise e def handle_openai_text_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") text = "" is_finished = False finish_reason = None usage = None choices = getattr(chunk, "choices", []) if len(choices) > 0: text = choices[0].text if choices[0].finish_reason is not None: is_finished = True finish_reason = choices[0].finish_reason usage = getattr(chunk, "usage", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "usage": usage, } except Exception as e: raise e def handle_baseten_chunk(self, chunk): try: chunk = chunk.decode("utf-8") if len(chunk) > 0: if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) if "token" in data_json and "text" in data_json["token"]: return data_json["token"]["text"] else: return "" data_json = json.loads(chunk) if "model_output" in data_json: if ( isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list) ): return data_json["model_output"]["data"][0] elif isinstance(data_json["model_output"], str): return data_json["model_output"] elif "completion" in data_json and isinstance( data_json["completion"], str ): return data_json["completion"] else: raise ValueError( f"Unable to parse response. Original response: {chunk}" ) else: return "" else: return "" except: traceback.print_exc() return "" def handle_cloudlfare_stream(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") chunk = chunk.decode("utf-8") str_line = chunk text = "" is_finished = False finish_reason = None if "[DONE]" in chunk: return {"text": text, "is_finished": True, "finish_reason": "stop"} elif str_line.startswith("data:"): data_json = json.loads(str_line[5:]) print_verbose(f"delta content: {data_json}") text = data_json["response"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise e def handle_ollama_stream(self, chunk): try: if isinstance(chunk, dict): json_chunk = chunk else: json_chunk = json.loads(chunk) if "error" in json_chunk: raise Exception(f"Ollama Error - {json_chunk}") text = "" is_finished = False finish_reason = None if json_chunk["done"] == True: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif json_chunk["response"]: print_verbose(f"delta content: {json_chunk}") text = json_chunk["response"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: raise Exception(f"Ollama Error - {json_chunk}") except Exception as e: raise e def handle_ollama_chat_stream(self, chunk): # for ollama_chat/ provider try: if isinstance(chunk, dict): json_chunk = chunk else: json_chunk = json.loads(chunk) if "error" in json_chunk: raise Exception(f"Ollama Error - {json_chunk}") text = "" is_finished = False finish_reason = None if json_chunk["done"] == True: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "message" in json_chunk: print_verbose(f"delta content: {json_chunk}") text = json_chunk["message"]["content"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: raise Exception(f"Ollama Error - {json_chunk}") except Exception as e: raise e def handle_bedrock_stream(self, chunk): if "cohere" in self.model: return { "text": chunk["text"], "is_finished": chunk["is_finished"], "finish_reason": chunk["finish_reason"], } if hasattr(chunk, "get"): chunk = chunk.get("chunk") chunk_data = json.loads(chunk.get("bytes").decode()) else: chunk_data = json.loads(chunk.decode()) if chunk_data: text = "" is_finished = False finish_reason = "" if "outputText" in chunk_data: text = chunk_data["outputText"] # ai21 mapping if "ai21" in self.model: # fake ai21 streaming text = chunk_data.get("completions")[0].get("data").get("text") is_finished = True finish_reason = "stop" ######## bedrock.anthropic mappings ############### elif "completion" in chunk_data: # not claude-3 text = chunk_data["completion"] # bedrock.anthropic stop_reason = chunk_data.get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason elif "delta" in chunk_data: if chunk_data["delta"].get("text", None) is not None: text = chunk_data["delta"]["text"] stop_reason = chunk_data["delta"].get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason ######## bedrock.mistral mappings ############### elif "outputs" in chunk_data: if ( len(chunk_data["outputs"]) == 1 and chunk_data["outputs"][0].get("text", None) is not None ): text = chunk_data["outputs"][0]["text"] stop_reason = chunk_data.get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason ######## bedrock.cohere mappings ############### # meta mapping elif "generation" in chunk_data: text = chunk_data["generation"] # bedrock.meta # cohere mapping elif "text" in chunk_data: text = chunk_data["text"] # bedrock.cohere # cohere mapping for finish reason elif "finish_reason" in chunk_data: finish_reason = chunk_data["finish_reason"] is_finished = True elif chunk_data.get("completionReason", None): is_finished = True finish_reason = chunk_data["completionReason"] elif chunk.get("error", None): raise Exception(chunk["error"]) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } return "" def handle_sagemaker_stream(self, chunk): if "data: [DONE]" in chunk: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif isinstance(chunk, dict): if chunk["is_finished"] == True: finish_reason = "stop" else: finish_reason = "" return { "text": chunk["text"], "is_finished": chunk["is_finished"], "finish_reason": finish_reason, } def handle_watsonx_stream(self, chunk): try: if isinstance(chunk, dict): parsed_response = chunk elif isinstance(chunk, (str, bytes)): if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") if "generated_text" in chunk: response = chunk.replace("data: ", "").strip() parsed_response = json.loads(response) else: return { "text": "", "is_finished": False, "prompt_tokens": 0, "completion_tokens": 0, } else: print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") raise ValueError( f"Unable to parse response. Original response: {chunk}" ) results = parsed_response.get("results", []) if len(results) > 0: text = results[0].get("generated_text", "") finish_reason = results[0].get("stop_reason") is_finished = finish_reason != "not_finished" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "prompt_tokens": results[0].get("input_token_count", 0), "completion_tokens": results[0].get("generated_token_count", 0), } return {"text": "", "is_finished": False} except Exception as e: raise e def handle_clarifai_completion_chunk(self, chunk): try: if isinstance(chunk, dict): parsed_response = chunk if isinstance(chunk, (str, bytes)): if isinstance(chunk, bytes): parsed_response = chunk.decode("utf-8") else: parsed_response = chunk data_json = json.loads(parsed_response) text = ( data_json.get("outputs", "")[0] .get("data", "") .get("text", "") .get("raw", "") ) prompt_tokens = len( encoding.encode( data_json.get("outputs", "")[0] .get("input", "") .get("data", "") .get("text", "") .get("raw", "") ) ) completion_tokens = len(encoding.encode(text)) return { "text": text, "is_finished": True, } except: traceback.print_exc() return "" def model_response_creator(self): model_response = ModelResponse( stream=True, model=self.model, stream_options=self.stream_options ) if self.response_id is not None: model_response.id = self.response_id else: self.response_id = model_response.id if self.system_fingerprint is not None: model_response.system_fingerprint = self.system_fingerprint model_response._hidden_params["custom_llm_provider"] = self.custom_llm_provider model_response._hidden_params["created_at"] = time.time() model_response.choices = [StreamingChoices()] model_response.choices[0].finish_reason = None return model_response def is_delta_empty(self, delta: Delta) -> bool: is_empty = True if delta.content is not None: is_empty = False elif delta.tool_calls is not None: is_empty = False elif delta.function_call is not None: is_empty = False return is_empty def chunk_creator(self, chunk): model_response = self.model_response_creator() response_obj = {} try: # return this for all models completion_obj = {"content": ""} if self.custom_llm_provider and self.custom_llm_provider == "anthropic": response_obj = self.handle_anthropic_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif ( self.custom_llm_provider and self.custom_llm_provider == "anthropic_text" ): response_obj = self.handle_anthropic_text_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": response_obj = self.handle_clarifai_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] elif self.model == "replicate" or self.custom_llm_provider == "replicate": response_obj = self.handle_replicate_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "together_ai": response_obj = self.handle_together_ai_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": response_obj = self.handle_huggingface_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "predibase": response_obj = self.handle_predibase_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif ( self.custom_llm_provider and self.custom_llm_provider == "baseten" ): # baseten doesn't provide streaming completion_obj["content"] = self.handle_baseten_chunk(chunk) elif ( self.custom_llm_provider and self.custom_llm_provider == "ai21" ): # ai21 doesn't provide streaming response_obj = self.handle_ai21_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": response_obj = self.handle_maritalk_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "vllm": completion_obj["content"] = chunk[0].outputs[0].text elif ( self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" ): # aleph alpha doesn't provide streaming response_obj = self.handle_aleph_alpha_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "nlp_cloud": try: response_obj = self.handle_nlp_cloud_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] except Exception as e: if self.received_finish_reason: raise e else: if self.sent_first_chunk is False: raise Exception("An unknown error occurred with the stream") self.received_finish_reason = "stop" elif self.custom_llm_provider == "gemini": if hasattr(chunk, "parts") == True: try: if len(chunk.parts) > 0: completion_obj["content"] = chunk.parts[0].text if len(chunk.parts) > 0 and hasattr( chunk.parts[0], "finish_reason" ): self.received_finish_reason = chunk.parts[ 0 ].finish_reason.name except: if chunk.parts[0].finish_reason.name == "SAFETY": raise Exception( f"The response was blocked by VertexAI. {str(chunk)}" ) else: completion_obj["content"] = str(chunk) elif self.custom_llm_provider and (self.custom_llm_provider == "vertex_ai"): if self.model.startswith("claude-3"): response_obj = self.handle_vertexai_anthropic_chunk(chunk=chunk) if response_obj is None: return completion_obj["content"] = response_obj["text"] setattr(model_response, "usage", Usage()) if response_obj.get("prompt_tokens", None) is not None: model_response.usage.prompt_tokens = response_obj[ "prompt_tokens" ] if response_obj.get("completion_tokens", None) is not None: model_response.usage.completion_tokens = response_obj[ "completion_tokens" ] if hasattr(model_response.usage, "prompt_tokens"): model_response.usage.total_tokens = ( getattr(model_response.usage, "total_tokens", 0) + model_response.usage.prompt_tokens ) if hasattr(model_response.usage, "completion_tokens"): model_response.usage.total_tokens = ( getattr(model_response.usage, "total_tokens", 0) + model_response.usage.completion_tokens ) if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif hasattr(chunk, "candidates") == True: try: try: completion_obj["content"] = chunk.text except Exception as e: if "Part has no text." in str(e): ## check for function calling function_call = ( chunk.candidates[0].content.parts[0].function_call ) args_dict = {} for k, v in function_call.args.items(): args_dict[k] = v args_str = json.dumps(args_dict) _delta_obj = litellm.utils.Delta( content=None, tool_calls=[ { "id": f"call_{str(uuid.uuid4())}", "function": { "arguments": args_str, "name": function_call.name, }, "type": "function", } ], ) _streaming_response = StreamingChoices(delta=_delta_obj) _model_response = ModelResponse(stream=True) _model_response.choices = [_streaming_response] response_obj = {"original_chunk": _model_response} else: raise e if ( hasattr(chunk.candidates[0], "finish_reason") and chunk.candidates[0].finish_reason.name != "FINISH_REASON_UNSPECIFIED" ): # every non-final chunk in vertex ai has this self.received_finish_reason = chunk.candidates[ 0 ].finish_reason.name except Exception as e: if chunk.candidates[0].finish_reason.name == "SAFETY": raise Exception( f"The response was blocked by VertexAI. {str(chunk)}" ) else: completion_obj["content"] = str(chunk) elif self.custom_llm_provider == "cohere": response_obj = self.handle_cohere_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cohere_chat": response_obj = self.handle_cohere_chat_chunk(chunk) if response_obj is None: return completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "bedrock": if self.received_finish_reason is not None: raise StopIteration response_obj = self.handle_bedrock_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "sagemaker": print_verbose(f"ENTERS SAGEMAKER STREAMING for chunk {chunk}") response_obj = self.handle_sagemaker_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "petals": if len(self.completion_stream) == 0: if self.received_finish_reason is not None: raise StopIteration else: self.received_finish_reason = "stop" chunk_size = 30 new_chunk = self.completion_stream[:chunk_size] completion_obj["content"] = new_chunk self.completion_stream = self.completion_stream[chunk_size:] time.sleep(0.05) elif self.custom_llm_provider == "palm": # fake streaming response_obj = {} if len(self.completion_stream) == 0: if self.received_finish_reason is not None: raise StopIteration else: self.received_finish_reason = "stop" chunk_size = 30 new_chunk = self.completion_stream[:chunk_size] completion_obj["content"] = new_chunk self.completion_stream = self.completion_stream[chunk_size:] time.sleep(0.05) elif self.custom_llm_provider == "ollama": response_obj = self.handle_ollama_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "ollama_chat": response_obj = self.handle_ollama_chat_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cloudflare": response_obj = self.handle_cloudlfare_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "watsonx": response_obj = self.handle_watsonx_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "text-completion-openai": response_obj = self.handle_openai_text_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] if ( self.stream_options and self.stream_options.get("include_usage", False) == True ): model_response.usage = response_obj["usage"] elif self.custom_llm_provider == "azure_text": response_obj = self.handle_azure_text_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cached_response": response_obj = { "text": chunk.choices[0].delta.content, "is_finished": True, "finish_reason": chunk.choices[0].finish_reason, "original_chunk": chunk, } completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if hasattr(chunk, "id"): model_response.id = chunk.id self.response_id = chunk.id if hasattr(chunk, "system_fingerprint"): self.system_fingerprint = chunk.system_fingerprint if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] else: # openai / azure chat model if self.custom_llm_provider == "azure": if hasattr(chunk, "model"): # for azure, we need to pass the model from the orignal chunk self.model = chunk.model response_obj = self.handle_openai_chat_completion_chunk(chunk) if response_obj == None: return completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: if response_obj["finish_reason"] == "error": raise Exception( "Mistral API raised a streaming error - finish_reason: error, no content string given." ) self.received_finish_reason = response_obj["finish_reason"] if response_obj.get("original_chunk", None) is not None: if hasattr(response_obj["original_chunk"], "id"): model_response.id = response_obj["original_chunk"].id self.response_id = model_response.id if hasattr(response_obj["original_chunk"], "system_fingerprint"): model_response.system_fingerprint = response_obj[ "original_chunk" ].system_fingerprint self.system_fingerprint = response_obj[ "original_chunk" ].system_fingerprint if response_obj["logprobs"] is not None: model_response.choices[0].logprobs = response_obj["logprobs"] if ( self.stream_options is not None and self.stream_options["include_usage"] == True ): model_response.usage = response_obj["usage"] model_response.model = self.model print_verbose( f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" ) ## FUNCTION CALL PARSING if ( response_obj is not None and response_obj.get("original_chunk", None) is not None ): # function / tool calling branch - only set for openai/azure compatible endpoints # enter this branch when no content has been passed in response original_chunk = response_obj.get("original_chunk", None) model_response.id = original_chunk.id self.response_id = original_chunk.id if len(original_chunk.choices) > 0: if ( original_chunk.choices[0].delta.function_call is not None or original_chunk.choices[0].delta.tool_calls is not None ): try: delta = original_chunk.choices[0].delta model_response.system_fingerprint = ( original_chunk.system_fingerprint ) ## AZURE - check if arguments is not None if ( original_chunk.choices[0].delta.function_call is not None ): if ( getattr( original_chunk.choices[0].delta.function_call, "arguments", ) is None ): original_chunk.choices[ 0 ].delta.function_call.arguments = "" elif original_chunk.choices[0].delta.tool_calls is not None: if isinstance( original_chunk.choices[0].delta.tool_calls, list ): for t in original_chunk.choices[0].delta.tool_calls: if hasattr(t, "functions") and hasattr( t.functions, "arguments" ): if ( getattr( t.function, "arguments", ) is None ): t.function.arguments = "" _json_delta = delta.model_dump() print_verbose(f"_json_delta: {_json_delta}") if "role" not in _json_delta or _json_delta["role"] is None: _json_delta["role"] = ( "assistant" # mistral's api returns role as None ) if "tool_calls" in _json_delta and isinstance( _json_delta["tool_calls"], list ): for tool in _json_delta["tool_calls"]: if ( isinstance(tool, dict) and "function" in tool and isinstance(tool["function"], dict) and ("type" not in tool or tool["type"] is None) ): # if function returned but type set to None - mistral's api returns type: None tool["type"] = "function" model_response.choices[0].delta = Delta(**_json_delta) except Exception as e: traceback.print_exc() model_response.choices[0].delta = Delta() else: try: delta = dict(original_chunk.choices[0].delta) print_verbose(f"original delta: {delta}") model_response.choices[0].delta = Delta(**delta) print_verbose( f"new delta: {model_response.choices[0].delta}" ) except Exception as e: model_response.choices[0].delta = Delta() else: if ( self.stream_options is not None and self.stream_options["include_usage"] == True ): return model_response return print_verbose( f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" ) print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") ## RETURN ARG if ( "content" in completion_obj and isinstance(completion_obj["content"], str) and len(completion_obj["content"]) == 0 and hasattr(model_response, "usage") and hasattr(model_response.usage, "prompt_tokens") ): if self.sent_first_chunk == False: completion_obj["role"] = "assistant" self.sent_first_chunk = True model_response.choices[0].delta = Delta(**completion_obj) print_verbose(f"returning model_response: {model_response}") return model_response elif ( "content" in completion_obj and isinstance(completion_obj["content"], str) and len(completion_obj["content"]) > 0 ): # cannot set content of an OpenAI Object to be an empty string hold, model_response_str = self.check_special_tokens( chunk=completion_obj["content"], finish_reason=model_response.choices[0].finish_reason, ) # filter out bos/eos tokens from openai-compatible hf endpoints print_verbose( f"hold - {hold}, model_response_str - {model_response_str}" ) if hold is False: ## check if openai/azure chunk original_chunk = response_obj.get("original_chunk", None) if original_chunk: model_response.id = original_chunk.id self.response_id = original_chunk.id if len(original_chunk.choices) > 0: choices = [] for idx, choice in enumerate(original_chunk.choices): try: if isinstance(choice, BaseModel): try: choice_json = choice.model_dump() except Exception as e: choice_json = choice.dict() choice_json.pop( "finish_reason", None ) # for mistral etc. which return a value in their last chunk (not-openai compatible). print_verbose(f"choice_json: {choice_json}") choices.append(StreamingChoices(**choice_json)) except Exception as e: choices.append(StreamingChoices()) print_verbose(f"choices in streaming: {choices}") model_response.choices = choices else: return model_response.system_fingerprint = ( original_chunk.system_fingerprint ) print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") if self.sent_first_chunk == False: model_response.choices[0].delta["role"] = "assistant" self.sent_first_chunk = True elif self.sent_first_chunk == True and hasattr( model_response.choices[0].delta, "role" ): _initial_delta = model_response.choices[ 0 ].delta.model_dump() _initial_delta.pop("role", None) model_response.choices[0].delta = Delta(**_initial_delta) print_verbose( f"model_response.choices[0].delta: {model_response.choices[0].delta}" ) else: ## else completion_obj["content"] = model_response_str if self.sent_first_chunk == False: completion_obj["role"] = "assistant" self.sent_first_chunk = True model_response.choices[0].delta = Delta(**completion_obj) print_verbose(f"returning model_response: {model_response}") return model_response else: return elif self.received_finish_reason is not None: if self.sent_last_chunk == True: raise StopIteration # flush any remaining holding chunk if len(self.holding_chunk) > 0: if model_response.choices[0].delta.content is None: model_response.choices[0].delta.content = self.holding_chunk else: model_response.choices[0].delta.content = ( self.holding_chunk + model_response.choices[0].delta.content ) self.holding_chunk = "" # if delta is None _is_delta_empty = self.is_delta_empty( delta=model_response.choices[0].delta ) if _is_delta_empty: # get any function call arguments model_response.choices[0].finish_reason = map_finish_reason( finish_reason=self.received_finish_reason ) # ensure consistent output to openai self.sent_last_chunk = True return model_response elif ( model_response.choices[0].delta.tool_calls is not None or model_response.choices[0].delta.function_call is not None ): if self.sent_first_chunk == False: model_response.choices[0].delta["role"] = "assistant" self.sent_first_chunk = True return model_response else: return except StopIteration: raise StopIteration except Exception as e: traceback_exception = traceback.format_exc() e.message = str(e) raise exception_type( model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e, ) def set_logging_event_loop(self, loop): """ import litellm, asyncio loop = asyncio.get_event_loop() # 👈 gets the current event loop response = litellm.completion(.., stream=True) response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging for chunk in response: ... """ self.logging_loop = loop def run_success_logging_in_thread(self, processed_chunk): if litellm.disable_streaming_logging == True: """ [NOT RECOMMENDED] Set this via `litellm.disable_streaming_logging = True`. Disables streaming logging. """ return ## ASYNC LOGGING # Create an event loop for the new thread if self.logging_loop is not None: future = asyncio.run_coroutine_threadsafe( self.logging_obj.async_success_handler(processed_chunk), loop=self.logging_loop, ) result = future.result() else: asyncio.run(self.logging_obj.async_success_handler(processed_chunk)) ## SYNC LOGGING self.logging_obj.success_handler(processed_chunk) def finish_reason_handler(self): model_response = self.model_response_creator() if self.received_finish_reason is not None: model_response.choices[0].finish_reason = map_finish_reason( finish_reason=self.received_finish_reason ) else: model_response.choices[0].finish_reason = "stop" return model_response ## needs to handle the empty string case (even starting chunk can be an empty string) def __next__(self): try: while True: if ( isinstance(self.completion_stream, str) or isinstance(self.completion_stream, bytes) or isinstance(self.completion_stream, ModelResponse) ): chunk = self.completion_stream else: chunk = next(self.completion_stream) if chunk is not None and chunk != b"": print_verbose( f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" ) response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") if response is None: continue ## LOGGING threading.Thread( target=self.run_success_logging_in_thread, args=(response,) ).start() # log response self.response_uptil_now += ( response.choices[0].delta.get("content", "") or "" ) self.rules.post_call_rules( input=self.response_uptil_now, model=self.model ) # RETURN RESULT return response except StopIteration: if self.sent_last_chunk == True: raise # Re-raise StopIteration else: self.sent_last_chunk = True processed_chunk = self.finish_reason_handler() ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,) ).start() # log response return processed_chunk except Exception as e: traceback_exception = traceback.format_exc() # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated threading.Thread( target=self.logging_obj.failure_handler, args=(e, traceback_exception) ).start() if isinstance(e, OpenAIError): raise e else: raise exception_type( model=self.model, original_exception=e, custom_llm_provider=self.custom_llm_provider, ) async def __anext__(self): try: if ( self.custom_llm_provider == "openai" or self.custom_llm_provider == "azure" or self.custom_llm_provider == "custom_openai" or self.custom_llm_provider == "text-completion-openai" or self.custom_llm_provider == "azure_text" or self.custom_llm_provider == "anthropic" or self.custom_llm_provider == "anthropic_text" or self.custom_llm_provider == "huggingface" or self.custom_llm_provider == "ollama" or self.custom_llm_provider == "ollama_chat" or self.custom_llm_provider == "vertex_ai" or self.custom_llm_provider == "sagemaker" or self.custom_llm_provider == "gemini" or self.custom_llm_provider == "cached_response" or self.custom_llm_provider == "predibase" or (self.custom_llm_provider == "bedrock" and "cohere" in self.model) or self.custom_llm_provider in litellm.openai_compatible_endpoints ): async for chunk in self.completion_stream: print_verbose(f"value of async chunk: {chunk}") if chunk == "None" or chunk is None: raise Exception elif ( self.custom_llm_provider == "gemini" and hasattr(chunk, "parts") and len(chunk.parts) == 0 ): continue # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. # __anext__ also calls async_success_handler, which does logging print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") processed_chunk: Optional[ModelResponse] = self.chunk_creator( chunk=chunk ) print_verbose( f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}" ) if processed_chunk is None: continue ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,) ).start() # log response asyncio.create_task( self.logging_obj.async_success_handler( processed_chunk, ) ) self.response_uptil_now += ( processed_chunk.choices[0].delta.get("content", "") or "" ) self.rules.post_call_rules( input=self.response_uptil_now, model=self.model ) print_verbose(f"final returned processed chunk: {processed_chunk}") return processed_chunk raise StopAsyncIteration else: # temporary patch for non-aiohttp async calls # example - boto3 bedrock llms while True: if isinstance(self.completion_stream, str) or isinstance( self.completion_stream, bytes ): chunk = self.completion_stream else: chunk = next(self.completion_stream) if chunk is not None and chunk != b"": print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") processed_chunk: Optional[ModelResponse] = self.chunk_creator( chunk=chunk ) print_verbose( f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}" ) if processed_chunk is None: continue ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,), ).start() # log processed_chunk asyncio.create_task( self.logging_obj.async_success_handler( processed_chunk, ) ) self.response_uptil_now += ( processed_chunk.choices[0].delta.get("content", "") or "" ) self.rules.post_call_rules( input=self.response_uptil_now, model=self.model ) # RETURN RESULT return processed_chunk except StopAsyncIteration: if self.sent_last_chunk == True: raise # Re-raise StopIteration else: self.sent_last_chunk = True processed_chunk = self.finish_reason_handler() ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,) ).start() # log response asyncio.create_task( self.logging_obj.async_success_handler( processed_chunk, ) ) return processed_chunk except StopIteration: if self.sent_last_chunk == True: raise StopAsyncIteration else: self.sent_last_chunk = True processed_chunk = self.finish_reason_handler() ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,) ).start() # log response asyncio.create_task( self.logging_obj.async_success_handler( processed_chunk, ) ) return processed_chunk except Exception as e: traceback_exception = traceback.format_exc() # Handle any exceptions that might occur during streaming asyncio.create_task( self.logging_obj.async_failure_handler(e, traceback_exception) ) raise e
(completion_stream, model, custom_llm_provider=None, logging_obj=None, stream_options=None)
63,147
litellm.utils
__aiter__
null
def __aiter__(self): return self
(self)
63,148
litellm.utils
__anext__
null
def __next__(self): try: while True: if ( isinstance(self.completion_stream, str) or isinstance(self.completion_stream, bytes) or isinstance(self.completion_stream, ModelResponse) ): chunk = self.completion_stream else: chunk = next(self.completion_stream) if chunk is not None and chunk != b"": print_verbose( f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" ) response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") if response is None: continue ## LOGGING threading.Thread( target=self.run_success_logging_in_thread, args=(response,) ).start() # log response self.response_uptil_now += ( response.choices[0].delta.get("content", "") or "" ) self.rules.post_call_rules( input=self.response_uptil_now, model=self.model ) # RETURN RESULT return response except StopIteration: if self.sent_last_chunk == True: raise # Re-raise StopIteration else: self.sent_last_chunk = True processed_chunk = self.finish_reason_handler() ## LOGGING threading.Thread( target=self.logging_obj.success_handler, args=(processed_chunk,) ).start() # log response return processed_chunk except Exception as e: traceback_exception = traceback.format_exc() # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated threading.Thread( target=self.logging_obj.failure_handler, args=(e, traceback_exception) ).start() if isinstance(e, OpenAIError): raise e else: raise exception_type( model=self.model, original_exception=e, custom_llm_provider=self.custom_llm_provider, )
(self)
63,149
litellm.utils
__init__
null
def __init__( self, completion_stream, model, custom_llm_provider=None, logging_obj=None, stream_options=None, ): self.model = model self.custom_llm_provider = custom_llm_provider self.logging_obj = logging_obj self.completion_stream = completion_stream self.sent_first_chunk = False self.sent_last_chunk = False self.system_fingerprint: Optional[str] = None self.received_finish_reason: Optional[str] = None self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "<s>", "</s>"] self.holding_chunk = "" self.complete_response = "" self.response_uptil_now = "" _model_info = ( self.logging_obj.model_call_details.get("litellm_params", {}).get( "model_info", {} ) or {} ) self._hidden_params = { "model_id": (_model_info.get("id", None)) } # returned as x-litellm-model-id response header in proxy self.response_id = None self.logging_loop = None self.rules = Rules() self.stream_options = stream_options
(self, completion_stream, model, custom_llm_provider=None, logging_obj=None, stream_options=None)
63,152
litellm.utils
check_special_tokens
Output parse <s> / </s> special tokens for sagemaker + hf streaming.
def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): """ Output parse <s> / </s> special tokens for sagemaker + hf streaming. """ hold = False if ( self.custom_llm_provider != "huggingface" and self.custom_llm_provider != "sagemaker" ): return hold, chunk if finish_reason: for token in self.special_tokens: if token in chunk: chunk = chunk.replace(token, "") return hold, chunk if self.sent_first_chunk is True: return hold, chunk curr_chunk = self.holding_chunk + chunk curr_chunk = curr_chunk.strip() for token in self.special_tokens: if len(curr_chunk) < len(token) and curr_chunk in token: hold = True self.holding_chunk = curr_chunk elif len(curr_chunk) >= len(token): if token in curr_chunk: self.holding_chunk = curr_chunk.replace(token, "") hold = True else: pass if hold is False: # reset self.holding_chunk = "" return hold, curr_chunk
(self, chunk: str, finish_reason: Optional[str])
63,153
litellm.utils
chunk_creator
null
def chunk_creator(self, chunk): model_response = self.model_response_creator() response_obj = {} try: # return this for all models completion_obj = {"content": ""} if self.custom_llm_provider and self.custom_llm_provider == "anthropic": response_obj = self.handle_anthropic_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif ( self.custom_llm_provider and self.custom_llm_provider == "anthropic_text" ): response_obj = self.handle_anthropic_text_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": response_obj = self.handle_clarifai_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] elif self.model == "replicate" or self.custom_llm_provider == "replicate": response_obj = self.handle_replicate_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "together_ai": response_obj = self.handle_together_ai_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": response_obj = self.handle_huggingface_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "predibase": response_obj = self.handle_predibase_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif ( self.custom_llm_provider and self.custom_llm_provider == "baseten" ): # baseten doesn't provide streaming completion_obj["content"] = self.handle_baseten_chunk(chunk) elif ( self.custom_llm_provider and self.custom_llm_provider == "ai21" ): # ai21 doesn't provide streaming response_obj = self.handle_ai21_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": response_obj = self.handle_maritalk_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider and self.custom_llm_provider == "vllm": completion_obj["content"] = chunk[0].outputs[0].text elif ( self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" ): # aleph alpha doesn't provide streaming response_obj = self.handle_aleph_alpha_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "nlp_cloud": try: response_obj = self.handle_nlp_cloud_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] except Exception as e: if self.received_finish_reason: raise e else: if self.sent_first_chunk is False: raise Exception("An unknown error occurred with the stream") self.received_finish_reason = "stop" elif self.custom_llm_provider == "gemini": if hasattr(chunk, "parts") == True: try: if len(chunk.parts) > 0: completion_obj["content"] = chunk.parts[0].text if len(chunk.parts) > 0 and hasattr( chunk.parts[0], "finish_reason" ): self.received_finish_reason = chunk.parts[ 0 ].finish_reason.name except: if chunk.parts[0].finish_reason.name == "SAFETY": raise Exception( f"The response was blocked by VertexAI. {str(chunk)}" ) else: completion_obj["content"] = str(chunk) elif self.custom_llm_provider and (self.custom_llm_provider == "vertex_ai"): if self.model.startswith("claude-3"): response_obj = self.handle_vertexai_anthropic_chunk(chunk=chunk) if response_obj is None: return completion_obj["content"] = response_obj["text"] setattr(model_response, "usage", Usage()) if response_obj.get("prompt_tokens", None) is not None: model_response.usage.prompt_tokens = response_obj[ "prompt_tokens" ] if response_obj.get("completion_tokens", None) is not None: model_response.usage.completion_tokens = response_obj[ "completion_tokens" ] if hasattr(model_response.usage, "prompt_tokens"): model_response.usage.total_tokens = ( getattr(model_response.usage, "total_tokens", 0) + model_response.usage.prompt_tokens ) if hasattr(model_response.usage, "completion_tokens"): model_response.usage.total_tokens = ( getattr(model_response.usage, "total_tokens", 0) + model_response.usage.completion_tokens ) if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif hasattr(chunk, "candidates") == True: try: try: completion_obj["content"] = chunk.text except Exception as e: if "Part has no text." in str(e): ## check for function calling function_call = ( chunk.candidates[0].content.parts[0].function_call ) args_dict = {} for k, v in function_call.args.items(): args_dict[k] = v args_str = json.dumps(args_dict) _delta_obj = litellm.utils.Delta( content=None, tool_calls=[ { "id": f"call_{str(uuid.uuid4())}", "function": { "arguments": args_str, "name": function_call.name, }, "type": "function", } ], ) _streaming_response = StreamingChoices(delta=_delta_obj) _model_response = ModelResponse(stream=True) _model_response.choices = [_streaming_response] response_obj = {"original_chunk": _model_response} else: raise e if ( hasattr(chunk.candidates[0], "finish_reason") and chunk.candidates[0].finish_reason.name != "FINISH_REASON_UNSPECIFIED" ): # every non-final chunk in vertex ai has this self.received_finish_reason = chunk.candidates[ 0 ].finish_reason.name except Exception as e: if chunk.candidates[0].finish_reason.name == "SAFETY": raise Exception( f"The response was blocked by VertexAI. {str(chunk)}" ) else: completion_obj["content"] = str(chunk) elif self.custom_llm_provider == "cohere": response_obj = self.handle_cohere_chunk(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cohere_chat": response_obj = self.handle_cohere_chat_chunk(chunk) if response_obj is None: return completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "bedrock": if self.received_finish_reason is not None: raise StopIteration response_obj = self.handle_bedrock_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "sagemaker": print_verbose(f"ENTERS SAGEMAKER STREAMING for chunk {chunk}") response_obj = self.handle_sagemaker_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "petals": if len(self.completion_stream) == 0: if self.received_finish_reason is not None: raise StopIteration else: self.received_finish_reason = "stop" chunk_size = 30 new_chunk = self.completion_stream[:chunk_size] completion_obj["content"] = new_chunk self.completion_stream = self.completion_stream[chunk_size:] time.sleep(0.05) elif self.custom_llm_provider == "palm": # fake streaming response_obj = {} if len(self.completion_stream) == 0: if self.received_finish_reason is not None: raise StopIteration else: self.received_finish_reason = "stop" chunk_size = 30 new_chunk = self.completion_stream[:chunk_size] completion_obj["content"] = new_chunk self.completion_stream = self.completion_stream[chunk_size:] time.sleep(0.05) elif self.custom_llm_provider == "ollama": response_obj = self.handle_ollama_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "ollama_chat": response_obj = self.handle_ollama_chat_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cloudflare": response_obj = self.handle_cloudlfare_stream(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "watsonx": response_obj = self.handle_watsonx_stream(chunk) completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "text-completion-openai": response_obj = self.handle_openai_text_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] if ( self.stream_options and self.stream_options.get("include_usage", False) == True ): model_response.usage = response_obj["usage"] elif self.custom_llm_provider == "azure_text": response_obj = self.handle_azure_text_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "cached_response": response_obj = { "text": chunk.choices[0].delta.content, "is_finished": True, "finish_reason": chunk.choices[0].finish_reason, "original_chunk": chunk, } completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if hasattr(chunk, "id"): model_response.id = chunk.id self.response_id = chunk.id if hasattr(chunk, "system_fingerprint"): self.system_fingerprint = chunk.system_fingerprint if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] else: # openai / azure chat model if self.custom_llm_provider == "azure": if hasattr(chunk, "model"): # for azure, we need to pass the model from the orignal chunk self.model = chunk.model response_obj = self.handle_openai_chat_completion_chunk(chunk) if response_obj == None: return completion_obj["content"] = response_obj["text"] print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: if response_obj["finish_reason"] == "error": raise Exception( "Mistral API raised a streaming error - finish_reason: error, no content string given." ) self.received_finish_reason = response_obj["finish_reason"] if response_obj.get("original_chunk", None) is not None: if hasattr(response_obj["original_chunk"], "id"): model_response.id = response_obj["original_chunk"].id self.response_id = model_response.id if hasattr(response_obj["original_chunk"], "system_fingerprint"): model_response.system_fingerprint = response_obj[ "original_chunk" ].system_fingerprint self.system_fingerprint = response_obj[ "original_chunk" ].system_fingerprint if response_obj["logprobs"] is not None: model_response.choices[0].logprobs = response_obj["logprobs"] if ( self.stream_options is not None and self.stream_options["include_usage"] == True ): model_response.usage = response_obj["usage"] model_response.model = self.model print_verbose( f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" ) ## FUNCTION CALL PARSING if ( response_obj is not None and response_obj.get("original_chunk", None) is not None ): # function / tool calling branch - only set for openai/azure compatible endpoints # enter this branch when no content has been passed in response original_chunk = response_obj.get("original_chunk", None) model_response.id = original_chunk.id self.response_id = original_chunk.id if len(original_chunk.choices) > 0: if ( original_chunk.choices[0].delta.function_call is not None or original_chunk.choices[0].delta.tool_calls is not None ): try: delta = original_chunk.choices[0].delta model_response.system_fingerprint = ( original_chunk.system_fingerprint ) ## AZURE - check if arguments is not None if ( original_chunk.choices[0].delta.function_call is not None ): if ( getattr( original_chunk.choices[0].delta.function_call, "arguments", ) is None ): original_chunk.choices[ 0 ].delta.function_call.arguments = "" elif original_chunk.choices[0].delta.tool_calls is not None: if isinstance( original_chunk.choices[0].delta.tool_calls, list ): for t in original_chunk.choices[0].delta.tool_calls: if hasattr(t, "functions") and hasattr( t.functions, "arguments" ): if ( getattr( t.function, "arguments", ) is None ): t.function.arguments = "" _json_delta = delta.model_dump() print_verbose(f"_json_delta: {_json_delta}") if "role" not in _json_delta or _json_delta["role"] is None: _json_delta["role"] = ( "assistant" # mistral's api returns role as None ) if "tool_calls" in _json_delta and isinstance( _json_delta["tool_calls"], list ): for tool in _json_delta["tool_calls"]: if ( isinstance(tool, dict) and "function" in tool and isinstance(tool["function"], dict) and ("type" not in tool or tool["type"] is None) ): # if function returned but type set to None - mistral's api returns type: None tool["type"] = "function" model_response.choices[0].delta = Delta(**_json_delta) except Exception as e: traceback.print_exc() model_response.choices[0].delta = Delta() else: try: delta = dict(original_chunk.choices[0].delta) print_verbose(f"original delta: {delta}") model_response.choices[0].delta = Delta(**delta) print_verbose( f"new delta: {model_response.choices[0].delta}" ) except Exception as e: model_response.choices[0].delta = Delta() else: if ( self.stream_options is not None and self.stream_options["include_usage"] == True ): return model_response return print_verbose( f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" ) print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") ## RETURN ARG if ( "content" in completion_obj and isinstance(completion_obj["content"], str) and len(completion_obj["content"]) == 0 and hasattr(model_response, "usage") and hasattr(model_response.usage, "prompt_tokens") ): if self.sent_first_chunk == False: completion_obj["role"] = "assistant" self.sent_first_chunk = True model_response.choices[0].delta = Delta(**completion_obj) print_verbose(f"returning model_response: {model_response}") return model_response elif ( "content" in completion_obj and isinstance(completion_obj["content"], str) and len(completion_obj["content"]) > 0 ): # cannot set content of an OpenAI Object to be an empty string hold, model_response_str = self.check_special_tokens( chunk=completion_obj["content"], finish_reason=model_response.choices[0].finish_reason, ) # filter out bos/eos tokens from openai-compatible hf endpoints print_verbose( f"hold - {hold}, model_response_str - {model_response_str}" ) if hold is False: ## check if openai/azure chunk original_chunk = response_obj.get("original_chunk", None) if original_chunk: model_response.id = original_chunk.id self.response_id = original_chunk.id if len(original_chunk.choices) > 0: choices = [] for idx, choice in enumerate(original_chunk.choices): try: if isinstance(choice, BaseModel): try: choice_json = choice.model_dump() except Exception as e: choice_json = choice.dict() choice_json.pop( "finish_reason", None ) # for mistral etc. which return a value in their last chunk (not-openai compatible). print_verbose(f"choice_json: {choice_json}") choices.append(StreamingChoices(**choice_json)) except Exception as e: choices.append(StreamingChoices()) print_verbose(f"choices in streaming: {choices}") model_response.choices = choices else: return model_response.system_fingerprint = ( original_chunk.system_fingerprint ) print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") if self.sent_first_chunk == False: model_response.choices[0].delta["role"] = "assistant" self.sent_first_chunk = True elif self.sent_first_chunk == True and hasattr( model_response.choices[0].delta, "role" ): _initial_delta = model_response.choices[ 0 ].delta.model_dump() _initial_delta.pop("role", None) model_response.choices[0].delta = Delta(**_initial_delta) print_verbose( f"model_response.choices[0].delta: {model_response.choices[0].delta}" ) else: ## else completion_obj["content"] = model_response_str if self.sent_first_chunk == False: completion_obj["role"] = "assistant" self.sent_first_chunk = True model_response.choices[0].delta = Delta(**completion_obj) print_verbose(f"returning model_response: {model_response}") return model_response else: return elif self.received_finish_reason is not None: if self.sent_last_chunk == True: raise StopIteration # flush any remaining holding chunk if len(self.holding_chunk) > 0: if model_response.choices[0].delta.content is None: model_response.choices[0].delta.content = self.holding_chunk else: model_response.choices[0].delta.content = ( self.holding_chunk + model_response.choices[0].delta.content ) self.holding_chunk = "" # if delta is None _is_delta_empty = self.is_delta_empty( delta=model_response.choices[0].delta ) if _is_delta_empty: # get any function call arguments model_response.choices[0].finish_reason = map_finish_reason( finish_reason=self.received_finish_reason ) # ensure consistent output to openai self.sent_last_chunk = True return model_response elif ( model_response.choices[0].delta.tool_calls is not None or model_response.choices[0].delta.function_call is not None ): if self.sent_first_chunk == False: model_response.choices[0].delta["role"] = "assistant" self.sent_first_chunk = True return model_response else: return except StopIteration: raise StopIteration except Exception as e: traceback_exception = traceback.format_exc() e.message = str(e) raise exception_type( model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e, )
(self, chunk)
63,154
litellm.utils
finish_reason_handler
null
def finish_reason_handler(self): model_response = self.model_response_creator() if self.received_finish_reason is not None: model_response.choices[0].finish_reason = map_finish_reason( finish_reason=self.received_finish_reason ) else: model_response.choices[0].finish_reason = "stop" return model_response
(self)
63,155
litellm.utils
handle_ai21_chunk
null
def handle_ai21_chunk(self, chunk): # fake streaming chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["completions"][0]["data"]["text"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,156
litellm.utils
handle_aleph_alpha_chunk
null
def handle_aleph_alpha_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["completions"][0]["completion"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,157
litellm.utils
handle_anthropic_chunk
null
def handle_anthropic_chunk(self, chunk): str_line = chunk if isinstance(chunk, bytes): # Handle binary data str_line = chunk.decode("utf-8") # Convert bytes to string text = "" is_finished = False finish_reason = None if str_line.startswith("data:"): data_json = json.loads(str_line[5:]) type_chunk = data_json.get("type", None) if type_chunk == "content_block_delta": """ Anthropic content chunk chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} """ text = data_json.get("delta", {}).get("text", "") elif type_chunk == "message_delta": """ Anthropic chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} """ # TODO - get usage from this chunk, set in response finish_reason = data_json.get("delta", {}).get("stop_reason", None) is_finished = True return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in str_line: raise ValueError(f"Unable to parse response. Original response: {str_line}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, }
(self, chunk)
63,158
litellm.utils
handle_anthropic_text_chunk
null
def handle_anthropic_text_chunk(self, chunk): str_line = chunk if isinstance(chunk, bytes): # Handle binary data str_line = chunk.decode("utf-8") # Convert bytes to string text = "" is_finished = False finish_reason = None if str_line.startswith("data:"): data_json = json.loads(str_line[5:]) type_chunk = data_json.get("type", None) if type_chunk == "completion": text = data_json.get("completion") finish_reason = data_json.get("stop_reason") if finish_reason is not None: is_finished = True return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in str_line: raise ValueError(f"Unable to parse response. Original response: {str_line}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, }
(self, chunk)
63,159
litellm.utils
handle_azure_chunk
null
def handle_azure_chunk(self, chunk): is_finished = False finish_reason = "" text = "" print_verbose(f"chunk: {chunk}") if "data: [DONE]" in chunk: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif chunk.startswith("data:"): data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): try: if len(data_json["choices"]) > 0: text = data_json["choices"][0]["delta"].get("content", "") if data_json["choices"][0].get("finish_reason", None): is_finished = True finish_reason = data_json["choices"][0]["finish_reason"] print_verbose( f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" ) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError( f"Unable to parse response. Original response: {chunk}" ) elif "error" in chunk: raise ValueError(f"Unable to parse response. Original response: {chunk}") else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, }
(self, chunk)
63,160
litellm.utils
handle_azure_text_completion_chunk
null
def handle_azure_text_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") text = "" is_finished = False finish_reason = None choices = getattr(chunk, "choices", []) if len(choices) > 0: text = choices[0].text if choices[0].finish_reason is not None: is_finished = True finish_reason = choices[0].finish_reason return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise e
(self, chunk)
63,161
litellm.utils
handle_baseten_chunk
null
def handle_baseten_chunk(self, chunk): try: chunk = chunk.decode("utf-8") if len(chunk) > 0: if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) if "token" in data_json and "text" in data_json["token"]: return data_json["token"]["text"] else: return "" data_json = json.loads(chunk) if "model_output" in data_json: if ( isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list) ): return data_json["model_output"]["data"][0] elif isinstance(data_json["model_output"], str): return data_json["model_output"] elif "completion" in data_json and isinstance( data_json["completion"], str ): return data_json["completion"] else: raise ValueError( f"Unable to parse response. Original response: {chunk}" ) else: return "" else: return "" except: traceback.print_exc() return ""
(self, chunk)
63,162
litellm.utils
handle_bedrock_stream
null
def handle_bedrock_stream(self, chunk): if "cohere" in self.model: return { "text": chunk["text"], "is_finished": chunk["is_finished"], "finish_reason": chunk["finish_reason"], } if hasattr(chunk, "get"): chunk = chunk.get("chunk") chunk_data = json.loads(chunk.get("bytes").decode()) else: chunk_data = json.loads(chunk.decode()) if chunk_data: text = "" is_finished = False finish_reason = "" if "outputText" in chunk_data: text = chunk_data["outputText"] # ai21 mapping if "ai21" in self.model: # fake ai21 streaming text = chunk_data.get("completions")[0].get("data").get("text") is_finished = True finish_reason = "stop" ######## bedrock.anthropic mappings ############### elif "completion" in chunk_data: # not claude-3 text = chunk_data["completion"] # bedrock.anthropic stop_reason = chunk_data.get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason elif "delta" in chunk_data: if chunk_data["delta"].get("text", None) is not None: text = chunk_data["delta"]["text"] stop_reason = chunk_data["delta"].get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason ######## bedrock.mistral mappings ############### elif "outputs" in chunk_data: if ( len(chunk_data["outputs"]) == 1 and chunk_data["outputs"][0].get("text", None) is not None ): text = chunk_data["outputs"][0]["text"] stop_reason = chunk_data.get("stop_reason", None) if stop_reason != None: is_finished = True finish_reason = stop_reason ######## bedrock.cohere mappings ############### # meta mapping elif "generation" in chunk_data: text = chunk_data["generation"] # bedrock.meta # cohere mapping elif "text" in chunk_data: text = chunk_data["text"] # bedrock.cohere # cohere mapping for finish reason elif "finish_reason" in chunk_data: finish_reason = chunk_data["finish_reason"] is_finished = True elif chunk_data.get("completionReason", None): is_finished = True finish_reason = chunk_data["completionReason"] elif chunk.get("error", None): raise Exception(chunk["error"]) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } return ""
(self, chunk)
63,163
litellm.utils
handle_clarifai_completion_chunk
null
def handle_clarifai_completion_chunk(self, chunk): try: if isinstance(chunk, dict): parsed_response = chunk if isinstance(chunk, (str, bytes)): if isinstance(chunk, bytes): parsed_response = chunk.decode("utf-8") else: parsed_response = chunk data_json = json.loads(parsed_response) text = ( data_json.get("outputs", "")[0] .get("data", "") .get("text", "") .get("raw", "") ) prompt_tokens = len( encoding.encode( data_json.get("outputs", "")[0] .get("input", "") .get("data", "") .get("text", "") .get("raw", "") ) ) completion_tokens = len(encoding.encode(text)) return { "text": text, "is_finished": True, } except: traceback.print_exc() return ""
(self, chunk)
63,164
litellm.utils
handle_cloudlfare_stream
null
def handle_cloudlfare_stream(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") chunk = chunk.decode("utf-8") str_line = chunk text = "" is_finished = False finish_reason = None if "[DONE]" in chunk: return {"text": text, "is_finished": True, "finish_reason": "stop"} elif str_line.startswith("data:"): data_json = json.loads(str_line[5:]) print_verbose(f"delta content: {data_json}") text = data_json["response"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise e
(self, chunk)
63,165
litellm.utils
handle_cohere_chat_chunk
null
def handle_cohere_chat_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) print_verbose(f"chunk: {chunk}") try: text = "" is_finished = False finish_reason = "" if "text" in data_json: text = data_json["text"] elif "is_finished" in data_json and data_json["is_finished"] == True: is_finished = data_json["is_finished"] finish_reason = data_json["finish_reason"] else: return return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,166
litellm.utils
handle_cohere_chunk
null
def handle_cohere_chunk(self, chunk): chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = "" is_finished = False finish_reason = "" if "text" in data_json: text = data_json["text"] elif "is_finished" in data_json: is_finished = data_json["is_finished"] finish_reason = data_json["finish_reason"] else: raise Exception(data_json) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,167
litellm.utils
handle_huggingface_chunk
null
def handle_huggingface_chunk(self, chunk): try: if type(chunk) != str: chunk = chunk.decode( "utf-8" ) # DO NOT REMOVE this: This is required for HF inference API + Streaming text = "" is_finished = False finish_reason = "" print_verbose(f"chunk: {chunk}") if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) print_verbose(f"data json: {data_json}") if "token" in data_json and "text" in data_json["token"]: text = data_json["token"]["text"] if data_json.get("details", False) and data_json["details"].get( "finish_reason", False ): is_finished = True finish_reason = data_json["details"]["finish_reason"] elif data_json.get( "generated_text", False ): # if full generated text exists, then stream is complete text = "" # don't return the final bos token is_finished = True finish_reason = "stop" elif data_json.get("error", False): raise Exception(data_json.get("error")) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in chunk: raise ValueError(chunk) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: traceback.print_exc() raise e
(self, chunk)
63,168
litellm.utils
handle_maritalk_chunk
null
def handle_maritalk_chunk(self, chunk): # fake streaming chunk = chunk.decode("utf-8") data_json = json.loads(chunk) try: text = data_json["answer"] is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,169
litellm.utils
handle_nlp_cloud_chunk
null
def handle_nlp_cloud_chunk(self, chunk): text = "" is_finished = False finish_reason = "" try: if "dolphin" in self.model: chunk = self.process_chunk(chunk=chunk) else: data_json = json.loads(chunk) chunk = data_json["generated_text"] text = chunk if "[DONE]" in text: text = text.replace("[DONE]", "") is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,170
litellm.utils
handle_ollama_chat_stream
null
def handle_ollama_chat_stream(self, chunk): # for ollama_chat/ provider try: if isinstance(chunk, dict): json_chunk = chunk else: json_chunk = json.loads(chunk) if "error" in json_chunk: raise Exception(f"Ollama Error - {json_chunk}") text = "" is_finished = False finish_reason = None if json_chunk["done"] == True: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "message" in json_chunk: print_verbose(f"delta content: {json_chunk}") text = json_chunk["message"]["content"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: raise Exception(f"Ollama Error - {json_chunk}") except Exception as e: raise e
(self, chunk)
63,171
litellm.utils
handle_ollama_stream
null
def handle_ollama_stream(self, chunk): try: if isinstance(chunk, dict): json_chunk = chunk else: json_chunk = json.loads(chunk) if "error" in json_chunk: raise Exception(f"Ollama Error - {json_chunk}") text = "" is_finished = False finish_reason = None if json_chunk["done"] == True: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif json_chunk["response"]: print_verbose(f"delta content: {json_chunk}") text = json_chunk["response"] return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } else: raise Exception(f"Ollama Error - {json_chunk}") except Exception as e: raise e
(self, chunk)
63,172
litellm.utils
handle_openai_chat_completion_chunk
null
def handle_openai_chat_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") str_line = chunk text = "" is_finished = False finish_reason = None logprobs = None usage = None original_chunk = None # this is used for function/tool calling if len(str_line.choices) > 0: if ( str_line.choices[0].delta is not None and str_line.choices[0].delta.content is not None ): text = str_line.choices[0].delta.content else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai original_chunk = str_line if str_line.choices[0].finish_reason: is_finished = True finish_reason = str_line.choices[0].finish_reason if finish_reason == "content_filter": if hasattr(str_line.choices[0], "content_filter_result"): error_message = json.dumps( str_line.choices[0].content_filter_result ) else: error_message = "Azure Response={}".format( str(dict(str_line)) ) raise litellm.AzureOpenAIError( status_code=400, message=error_message ) # checking for logprobs if ( hasattr(str_line.choices[0], "logprobs") and str_line.choices[0].logprobs is not None ): logprobs = str_line.choices[0].logprobs else: logprobs = None usage = getattr(str_line, "usage", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "logprobs": logprobs, "original_chunk": str_line, "usage": usage, } except Exception as e: traceback.print_exc() raise e
(self, chunk)
63,173
litellm.utils
handle_openai_text_completion_chunk
null
def handle_openai_text_completion_chunk(self, chunk): try: print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") text = "" is_finished = False finish_reason = None usage = None choices = getattr(chunk, "choices", []) if len(choices) > 0: text = choices[0].text if choices[0].finish_reason is not None: is_finished = True finish_reason = choices[0].finish_reason usage = getattr(chunk, "usage", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "usage": usage, } except Exception as e: raise e
(self, chunk)
63,174
litellm.utils
handle_predibase_chunk
null
def handle_predibase_chunk(self, chunk): try: if type(chunk) != str: chunk = chunk.decode( "utf-8" ) # DO NOT REMOVE this: This is required for HF inference API + Streaming text = "" is_finished = False finish_reason = "" print_verbose(f"chunk: {chunk}") if chunk.startswith("data:"): data_json = json.loads(chunk[5:]) print_verbose(f"data json: {data_json}") if "token" in data_json and "text" in data_json["token"]: text = data_json["token"]["text"] if data_json.get("details", False) and data_json["details"].get( "finish_reason", False ): is_finished = True finish_reason = data_json["details"]["finish_reason"] elif data_json.get( "generated_text", False ): # if full generated text exists, then stream is complete text = "" # don't return the final bos token is_finished = True finish_reason = "stop" elif data_json.get("error", False): raise Exception(data_json.get("error")) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "error" in chunk: raise ValueError(chunk) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except Exception as e: traceback.print_exc() raise e
(self, chunk)
63,175
litellm.utils
handle_replicate_chunk
null
def handle_replicate_chunk(self, chunk): try: text = "" is_finished = False finish_reason = "" if "output" in chunk: text = chunk["output"] if "status" in chunk: if chunk["status"] == "succeeded": is_finished = True finish_reason = "stop" elif chunk.get("error", None): raise Exception(chunk["error"]) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } except: raise ValueError(f"Unable to parse response. Original response: {chunk}")
(self, chunk)
63,176
litellm.utils
handle_sagemaker_stream
null
def handle_sagemaker_stream(self, chunk): if "data: [DONE]" in chunk: text = "" is_finished = True finish_reason = "stop" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif isinstance(chunk, dict): if chunk["is_finished"] == True: finish_reason = "stop" else: finish_reason = "" return { "text": chunk["text"], "is_finished": chunk["is_finished"], "finish_reason": finish_reason, }
(self, chunk)
63,177
litellm.utils
handle_together_ai_chunk
null
def handle_together_ai_chunk(self, chunk): chunk = chunk.decode("utf-8") text = "" is_finished = False finish_reason = None if "text" in chunk: text_index = chunk.find('"text":"') # this checks if text: exists text_start = text_index + len('"text":"') text_end = chunk.find('"}', text_start) if text_index != -1 and text_end != -1: extracted_text = chunk[text_start:text_end] text = extracted_text return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, } elif "[DONE]" in chunk: return {"text": text, "is_finished": True, "finish_reason": "stop"} elif "error" in chunk: raise litellm.together_ai.TogetherAIError( status_code=422, message=f"{str(chunk)}" ) else: return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, }
(self, chunk)
63,178
litellm.utils
handle_vertexai_anthropic_chunk
- MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai
def handle_vertexai_anthropic_chunk(self, chunk): """ - MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai """ text = "" prompt_tokens = None completion_tokens = None is_finished = False finish_reason = None type_chunk = getattr(chunk, "type", None) if type_chunk == "message_start": message = getattr(chunk, "message", None) text = "" # lets us return a chunk with usage to user _usage = getattr(message, "usage", None) if _usage is not None: prompt_tokens = getattr(_usage, "input_tokens", None) completion_tokens = getattr(_usage, "output_tokens", None) elif type_chunk == "content_block_delta": """ Anthropic content chunk chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} """ delta = getattr(chunk, "delta", None) if delta is not None: text = getattr(delta, "text", "") else: text = "" elif type_chunk == "message_delta": """ Anthropic chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} """ # TODO - get usage from this chunk, set in response delta = getattr(chunk, "delta", None) if delta is not None: finish_reason = getattr(delta, "stop_reason", "stop") is_finished = True _usage = getattr(chunk, "usage", None) if _usage is not None: prompt_tokens = getattr(_usage, "input_tokens", None) completion_tokens = getattr(_usage, "output_tokens", None) return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, }
(self, chunk)
63,179
litellm.utils
handle_watsonx_stream
null
def handle_watsonx_stream(self, chunk): try: if isinstance(chunk, dict): parsed_response = chunk elif isinstance(chunk, (str, bytes)): if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") if "generated_text" in chunk: response = chunk.replace("data: ", "").strip() parsed_response = json.loads(response) else: return { "text": "", "is_finished": False, "prompt_tokens": 0, "completion_tokens": 0, } else: print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") raise ValueError( f"Unable to parse response. Original response: {chunk}" ) results = parsed_response.get("results", []) if len(results) > 0: text = results[0].get("generated_text", "") finish_reason = results[0].get("stop_reason") is_finished = finish_reason != "not_finished" return { "text": text, "is_finished": is_finished, "finish_reason": finish_reason, "prompt_tokens": results[0].get("input_token_count", 0), "completion_tokens": results[0].get("generated_token_count", 0), } return {"text": "", "is_finished": False} except Exception as e: raise e
(self, chunk)
63,180
litellm.utils
is_delta_empty
null
def is_delta_empty(self, delta: Delta) -> bool: is_empty = True if delta.content is not None: is_empty = False elif delta.tool_calls is not None: is_empty = False elif delta.function_call is not None: is_empty = False return is_empty
(self, delta: litellm.utils.Delta) -> bool
63,181
litellm.utils
model_response_creator
null
def model_response_creator(self): model_response = ModelResponse( stream=True, model=self.model, stream_options=self.stream_options ) if self.response_id is not None: model_response.id = self.response_id else: self.response_id = model_response.id if self.system_fingerprint is not None: model_response.system_fingerprint = self.system_fingerprint model_response._hidden_params["custom_llm_provider"] = self.custom_llm_provider model_response._hidden_params["created_at"] = time.time() model_response.choices = [StreamingChoices()] model_response.choices[0].finish_reason = None return model_response
(self)
63,182
litellm.utils
process_chunk
NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta.
def process_chunk(self, chunk: str): """ NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. """ try: chunk = chunk.strip() self.complete_response = self.complete_response.strip() if chunk.startswith(self.complete_response): # Remove last_sent_chunk only if it appears at the start of the new chunk chunk = chunk[len(self.complete_response) :] self.complete_response += chunk return chunk except Exception as e: raise e
(self, chunk: str)
63,183
litellm.utils
run_success_logging_in_thread
null
def run_success_logging_in_thread(self, processed_chunk): if litellm.disable_streaming_logging == True: """ [NOT RECOMMENDED] Set this via `litellm.disable_streaming_logging = True`. Disables streaming logging. """ return ## ASYNC LOGGING # Create an event loop for the new thread if self.logging_loop is not None: future = asyncio.run_coroutine_threadsafe( self.logging_obj.async_success_handler(processed_chunk), loop=self.logging_loop, ) result = future.result() else: asyncio.run(self.logging_obj.async_success_handler(processed_chunk)) ## SYNC LOGGING self.logging_obj.success_handler(processed_chunk)
(self, processed_chunk)
63,184
litellm.utils
set_logging_event_loop
import litellm, asyncio loop = asyncio.get_event_loop() # 👈 gets the current event loop response = litellm.completion(.., stream=True) response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging for chunk in response: ...
def set_logging_event_loop(self, loop): """ import litellm, asyncio loop = asyncio.get_event_loop() # 👈 gets the current event loop response = litellm.completion(.., stream=True) response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging for chunk in response: ... """ self.logging_loop = loop
(self, loop)
63,185
litellm.types.router
Deployment
null
class Deployment(BaseModel): model_name: str litellm_params: LiteLLM_Params model_info: ModelInfo def __init__( self, model_name: str, litellm_params: LiteLLM_Params, model_info: Optional[Union[ModelInfo, dict]] = None, **params ): if model_info is None: model_info = ModelInfo() elif isinstance(model_info, dict): model_info = ModelInfo(**model_info) super().__init__( model_info=model_info, model_name=model_name, litellm_params=litellm_params, **params ) def to_json(self, **kwargs): try: return self.model_dump(**kwargs) # noqa except Exception as e: # if using pydantic v1 return self.dict(**kwargs) class Config: extra = "allow" protected_namespaces = () def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(model_name: str, litellm_params: litellm.types.router.LiteLLM_Params, model_info: litellm.types.router.ModelInfo = None, **params) -> None
63,194
litellm.types.router
__init__
null
def __init__( self, model_name: str, litellm_params: LiteLLM_Params, model_info: Optional[Union[ModelInfo, dict]] = None, **params ): if model_info is None: model_info = ModelInfo() elif isinstance(model_info, dict): model_info = ModelInfo(**model_info) super().__init__( model_info=model_info, model_name=model_name, litellm_params=litellm_params, **params )
(self, model_name: str, litellm_params: litellm.types.router.LiteLLM_Params, model_info: Union[litellm.types.router.ModelInfo, dict, NoneType] = None, **params)
63,218
litellm.types.router
to_json
null
def to_json(self, **kwargs): try: return self.model_dump(**kwargs) # noqa except Exception as e: # if using pydantic v1 return self.dict(**kwargs)
(self, **kwargs)
63,219
litellm.types.embedding
EmbeddingRequest
null
class EmbeddingRequest(BaseModel): model: str input: List[str] = [] timeout: int = 600 api_base: Optional[str] = None api_version: Optional[str] = None api_key: Optional[str] = None api_type: Optional[str] = None caching: bool = False user: Optional[str] = None custom_llm_provider: Optional[Union[str, dict]] = None litellm_call_id: Optional[str] = None litellm_logging_obj: Optional[dict] = None logger_fn: Optional[str] = None class Config: # allow kwargs extra = "allow"
(*, model: str, input: List[str] = [], timeout: int = 600, api_base: Optional[str] = None, api_version: Optional[str] = None, api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool = False, user: Optional[str] = None, custom_llm_provider: Union[str, dict, NoneType] = None, litellm_call_id: Optional[str] = None, litellm_logging_obj: Optional[dict] = None, logger_fn: Optional[str] = None, **extra_data: Any) -> None
63,248
litellm.utils
EmbeddingResponse
null
class EmbeddingResponse(OpenAIObject): model: Optional[str] = None """The model used for embedding.""" data: Optional[List] = None """The actual embedding value""" object: str """The object type, which is always "embedding" """ usage: Optional[Usage] = None """Usage statistics for the embedding request.""" _hidden_params: dict = {} def __init__( self, model=None, usage=None, stream=False, response_ms=None, data=None ): object = "list" if response_ms: _response_ms = response_ms else: _response_ms = None if data: data = data else: data = None if usage: usage = usage else: usage = Usage() model = model super().__init__(model=model, object=object, data=data, usage=usage) def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value) def json(self, **kwargs): try: return self.model_dump() # noqa except: # if using pydantic v1 return self.dict()
(model=None, usage=None, stream=False, response_ms=None, data=None)
63,257
litellm.utils
__init__
null
def __init__( self, model=None, usage=None, stream=False, response_ms=None, data=None ): object = "list" if response_ms: _response_ms = response_ms else: _response_ms = None if data: data = data else: data = None if usage: usage = usage else: usage = Usage() model = model super().__init__(model=model, object=object, data=data, usage=usage)
(self, model=None, usage=None, stream=False, response_ms=None, data=None)
63,276
litellm.utils
json
null
def json(self, **kwargs): try: return self.model_dump() # noqa except: # if using pydantic v1 return self.dict()
(self, **kwargs)
63,280
pydantic._internal._model_construction
init_private_attributes
This function is meant to behave like a BaseModel method to initialise private attributes. It takes context as an argument since that's what pydantic-core passes when calling it. Args: self: The BaseModel instance. __context: The context.
def init_private_attributes(self: BaseModel, __context: Any) -> None: """This function is meant to behave like a BaseModel method to initialise private attributes. It takes context as an argument since that's what pydantic-core passes when calling it. Args: self: The BaseModel instance. __context: The context. """ if getattr(self, '__pydantic_private__', None) is None: pydantic_private = {} for name, private_attr in self.__private_attributes__.items(): default = private_attr.get_default() if default is not PydanticUndefined: pydantic_private[name] = default object_setattr(self, '__pydantic_private__', pydantic_private)
(self: 'BaseModel', __context: 'Any') -> 'None'
63,283
pydantic.fields
Field
Usage docs: https://docs.pydantic.dev/2.7/concepts/fields Create a field for objects that can be configured. Used to provide extra information about a field, either for the model schema or complex validation. Some arguments apply only to number fields (`int`, `float`, `Decimal`) and some apply only to `str`. Note: - Any `_Unset` objects will be replaced by the corresponding value defined in the `_DefaultValues` dictionary. If a key for the `_Unset` object is not found in the `_DefaultValues` dictionary, it will default to `None` Args: default: Default value if the field is not set. default_factory: A callable to generate the default value, such as :func:`~datetime.utcnow`. alias: The name to use for the attribute when validating or serializing by alias. This is often used for things like converting between snake and camel case. alias_priority: Priority of the alias. This affects whether an alias generator is used. validation_alias: Like `alias`, but only affects validation, not serialization. serialization_alias: Like `alias`, but only affects serialization, not validation. title: Human-readable title. description: Human-readable description. examples: Example values for this field. exclude: Whether to exclude the field from the model serialization. discriminator: Field name or Discriminator for discriminating the type in a tagged union. deprecated: A deprecation message, an instance of `warnings.deprecated` or the `typing_extensions.deprecated` backport, or a boolean. If `True`, a default deprecation message will be emitted when accessing the field. json_schema_extra: A dict or callable to provide extra JSON schema properties. frozen: Whether the field is frozen. If true, attempts to change the value on an instance will raise an error. validate_default: If `True`, apply validation to the default value every time you create an instance. Otherwise, for performance reasons, the default value of the field is trusted and not validated. repr: A boolean indicating whether to include the field in the `__repr__` output. init: Whether the field should be included in the constructor of the dataclass. (Only applies to dataclasses.) init_var: Whether the field should _only_ be included in the constructor of the dataclass. (Only applies to dataclasses.) kw_only: Whether the field should be a keyword-only argument in the constructor of the dataclass. (Only applies to dataclasses.) coerce_numbers_to_str: Whether to enable coercion of any `Number` type to `str` (not applicable in `strict` mode). strict: If `True`, strict validation is applied to the field. See [Strict Mode](../concepts/strict_mode.md) for details. gt: Greater than. If set, value must be greater than this. Only applicable to numbers. ge: Greater than or equal. If set, value must be greater than or equal to this. Only applicable to numbers. lt: Less than. If set, value must be less than this. Only applicable to numbers. le: Less than or equal. If set, value must be less than or equal to this. Only applicable to numbers. multiple_of: Value must be a multiple of this. Only applicable to numbers. min_length: Minimum length for iterables. max_length: Maximum length for iterables. pattern: Pattern for strings (a regular expression). allow_inf_nan: Allow `inf`, `-inf`, `nan`. Only applicable to numbers. max_digits: Maximum number of allow digits for strings. decimal_places: Maximum number of decimal places allowed for numbers. union_mode: The strategy to apply when validating a union. Can be `smart` (the default), or `left_to_right`. See [Union Mode](standard_library_types.md#union-mode) for details. extra: (Deprecated) Extra fields that will be included in the JSON schema. !!! warning Deprecated The `extra` kwargs is deprecated. Use `json_schema_extra` instead. Returns: A new [`FieldInfo`][pydantic.fields.FieldInfo]. The return annotation is `Any` so `Field` can be used on type-annotated fields without causing a type error.
def Field( # noqa: C901 default: Any = PydanticUndefined, *, default_factory: typing.Callable[[], Any] | None = _Unset, alias: str | None = _Unset, alias_priority: int | None = _Unset, validation_alias: str | AliasPath | AliasChoices | None = _Unset, serialization_alias: str | None = _Unset, title: str | None = _Unset, description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | typing.Callable[[JsonDict], None] | None = _Unset, frozen: bool | None = _Unset, validate_default: bool | None = _Unset, repr: bool = _Unset, init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, pattern: str | typing.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: float | None = _Unset, ge: float | None = _Unset, lt: float | None = _Unset, le: float | None = _Unset, multiple_of: float | None = _Unset, allow_inf_nan: bool | None = _Unset, max_digits: int | None = _Unset, decimal_places: int | None = _Unset, min_length: int | None = _Unset, max_length: int | None = _Unset, union_mode: Literal['smart', 'left_to_right'] = _Unset, **extra: Unpack[_EmptyKwargs], ) -> Any: """Usage docs: https://docs.pydantic.dev/2.7/concepts/fields Create a field for objects that can be configured. Used to provide extra information about a field, either for the model schema or complex validation. Some arguments apply only to number fields (`int`, `float`, `Decimal`) and some apply only to `str`. Note: - Any `_Unset` objects will be replaced by the corresponding value defined in the `_DefaultValues` dictionary. If a key for the `_Unset` object is not found in the `_DefaultValues` dictionary, it will default to `None` Args: default: Default value if the field is not set. default_factory: A callable to generate the default value, such as :func:`~datetime.utcnow`. alias: The name to use for the attribute when validating or serializing by alias. This is often used for things like converting between snake and camel case. alias_priority: Priority of the alias. This affects whether an alias generator is used. validation_alias: Like `alias`, but only affects validation, not serialization. serialization_alias: Like `alias`, but only affects serialization, not validation. title: Human-readable title. description: Human-readable description. examples: Example values for this field. exclude: Whether to exclude the field from the model serialization. discriminator: Field name or Discriminator for discriminating the type in a tagged union. deprecated: A deprecation message, an instance of `warnings.deprecated` or the `typing_extensions.deprecated` backport, or a boolean. If `True`, a default deprecation message will be emitted when accessing the field. json_schema_extra: A dict or callable to provide extra JSON schema properties. frozen: Whether the field is frozen. If true, attempts to change the value on an instance will raise an error. validate_default: If `True`, apply validation to the default value every time you create an instance. Otherwise, for performance reasons, the default value of the field is trusted and not validated. repr: A boolean indicating whether to include the field in the `__repr__` output. init: Whether the field should be included in the constructor of the dataclass. (Only applies to dataclasses.) init_var: Whether the field should _only_ be included in the constructor of the dataclass. (Only applies to dataclasses.) kw_only: Whether the field should be a keyword-only argument in the constructor of the dataclass. (Only applies to dataclasses.) coerce_numbers_to_str: Whether to enable coercion of any `Number` type to `str` (not applicable in `strict` mode). strict: If `True`, strict validation is applied to the field. See [Strict Mode](../concepts/strict_mode.md) for details. gt: Greater than. If set, value must be greater than this. Only applicable to numbers. ge: Greater than or equal. If set, value must be greater than or equal to this. Only applicable to numbers. lt: Less than. If set, value must be less than this. Only applicable to numbers. le: Less than or equal. If set, value must be less than or equal to this. Only applicable to numbers. multiple_of: Value must be a multiple of this. Only applicable to numbers. min_length: Minimum length for iterables. max_length: Maximum length for iterables. pattern: Pattern for strings (a regular expression). allow_inf_nan: Allow `inf`, `-inf`, `nan`. Only applicable to numbers. max_digits: Maximum number of allow digits for strings. decimal_places: Maximum number of decimal places allowed for numbers. union_mode: The strategy to apply when validating a union. Can be `smart` (the default), or `left_to_right`. See [Union Mode](standard_library_types.md#union-mode) for details. extra: (Deprecated) Extra fields that will be included in the JSON schema. !!! warning Deprecated The `extra` kwargs is deprecated. Use `json_schema_extra` instead. Returns: A new [`FieldInfo`][pydantic.fields.FieldInfo]. The return annotation is `Any` so `Field` can be used on type-annotated fields without causing a type error. """ # Check deprecated and removed params from V1. This logic should eventually be removed. const = extra.pop('const', None) # type: ignore if const is not None: raise PydanticUserError('`const` is removed, use `Literal` instead', code='removed-kwargs') min_items = extra.pop('min_items', None) # type: ignore if min_items is not None: warn('`min_items` is deprecated and will be removed, use `min_length` instead', DeprecationWarning) if min_length in (None, _Unset): min_length = min_items # type: ignore max_items = extra.pop('max_items', None) # type: ignore if max_items is not None: warn('`max_items` is deprecated and will be removed, use `max_length` instead', DeprecationWarning) if max_length in (None, _Unset): max_length = max_items # type: ignore unique_items = extra.pop('unique_items', None) # type: ignore if unique_items is not None: raise PydanticUserError( ( '`unique_items` is removed, use `Set` instead' '(this feature is discussed in https://github.com/pydantic/pydantic-core/issues/296)' ), code='removed-kwargs', ) allow_mutation = extra.pop('allow_mutation', None) # type: ignore if allow_mutation is not None: warn('`allow_mutation` is deprecated and will be removed. use `frozen` instead', DeprecationWarning) if allow_mutation is False: frozen = True regex = extra.pop('regex', None) # type: ignore if regex is not None: raise PydanticUserError('`regex` is removed. use `pattern` instead', code='removed-kwargs') if isinstance(pattern, typing.Pattern): pattern = pattern.pattern if extra: warn( 'Using extra keyword arguments on `Field` is deprecated and will be removed.' ' Use `json_schema_extra` instead.' f' (Extra keys: {", ".join(k.__repr__() for k in extra.keys())})', DeprecationWarning, ) if not json_schema_extra or json_schema_extra is _Unset: json_schema_extra = extra # type: ignore if ( validation_alias and validation_alias is not _Unset and not isinstance(validation_alias, (str, AliasChoices, AliasPath)) ): raise TypeError('Invalid `validation_alias` type. it should be `str`, `AliasChoices`, or `AliasPath`') if serialization_alias in (_Unset, None) and isinstance(alias, str): serialization_alias = alias if validation_alias in (_Unset, None): validation_alias = alias include = extra.pop('include', None) # type: ignore if include is not None: warn('`include` is deprecated and does nothing. It will be removed, use `exclude` instead', DeprecationWarning) return FieldInfo.from_field( default, default_factory=default_factory, alias=alias, alias_priority=alias_priority, validation_alias=validation_alias, serialization_alias=serialization_alias, title=title, description=description, examples=examples, exclude=exclude, discriminator=discriminator, deprecated=deprecated, json_schema_extra=json_schema_extra, frozen=frozen, pattern=pattern, validate_default=validate_default, repr=repr, init=init, init_var=init_var, kw_only=kw_only, coerce_numbers_to_str=coerce_numbers_to_str, strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of, min_length=min_length, max_length=max_length, allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, union_mode=union_mode, )
(default: 'Any' = PydanticUndefined, *, default_factory: 'typing.Callable[[], Any] | None' = PydanticUndefined, alias: 'str | None' = PydanticUndefined, alias_priority: 'int | None' = PydanticUndefined, validation_alias: 'str | AliasPath | AliasChoices | None' = PydanticUndefined, serialization_alias: 'str | None' = PydanticUndefined, title: 'str | None' = PydanticUndefined, description: 'str | None' = PydanticUndefined, examples: 'list[Any] | None' = PydanticUndefined, exclude: 'bool | None' = PydanticUndefined, discriminator: 'str | types.Discriminator | None' = PydanticUndefined, deprecated: 'Deprecated | str | bool | None' = PydanticUndefined, json_schema_extra: 'JsonDict | typing.Callable[[JsonDict], None] | None' = PydanticUndefined, frozen: 'bool | None' = PydanticUndefined, validate_default: 'bool | None' = PydanticUndefined, repr: 'bool' = PydanticUndefined, init: 'bool | None' = PydanticUndefined, init_var: 'bool | None' = PydanticUndefined, kw_only: 'bool | None' = PydanticUndefined, pattern: 'str | typing.Pattern[str] | None' = PydanticUndefined, strict: 'bool | None' = PydanticUndefined, coerce_numbers_to_str: 'bool | None' = PydanticUndefined, gt: 'float | None' = PydanticUndefined, ge: 'float | None' = PydanticUndefined, lt: 'float | None' = PydanticUndefined, le: 'float | None' = PydanticUndefined, multiple_of: 'float | None' = PydanticUndefined, allow_inf_nan: 'bool | None' = PydanticUndefined, max_digits: 'int | None' = PydanticUndefined, decimal_places: 'int | None' = PydanticUndefined, min_length: 'int | None' = PydanticUndefined, max_length: 'int | None' = PydanticUndefined, union_mode: "Literal['smart', 'left_to_right']" = PydanticUndefined, **extra: 'Unpack[_EmptyKwargs]') -> 'Any'
63,284
litellm.types.llms.openai
FileSearchToolParam
null
class FileSearchToolParam(TypedDict, total=False): type: Required[Literal["file_search"]] """The type of tool being defined: `file_search`"""
null
63,285
litellm.llms.gemini
GeminiConfig
Reference: https://ai.google.dev/api/python/google/generativeai/GenerationConfig The class `GeminiConfig` provides configuration for the Gemini's API interface. Here are the parameters: - `candidate_count` (int): Number of generated responses to return. - `stop_sequences` (List[str]): The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. - `max_output_tokens` (int): The maximum number of tokens to include in a candidate. If unset, this will default to output_token_limit specified in the model's specification. - `temperature` (float): Controls the randomness of the output. Note: The default value varies by model, see the Model.temperature attribute of the Model returned the genai.get_model function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model. - `top_p` (float): Optional. The maximum cumulative probability of tokens to consider when sampling. - `top_k` (int): Optional. The maximum number of tokens to consider when sampling.
class GeminiConfig: """ Reference: https://ai.google.dev/api/python/google/generativeai/GenerationConfig The class `GeminiConfig` provides configuration for the Gemini's API interface. Here are the parameters: - `candidate_count` (int): Number of generated responses to return. - `stop_sequences` (List[str]): The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. - `max_output_tokens` (int): The maximum number of tokens to include in a candidate. If unset, this will default to output_token_limit specified in the model's specification. - `temperature` (float): Controls the randomness of the output. Note: The default value varies by model, see the Model.temperature attribute of the Model returned the genai.get_model function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model. - `top_p` (float): Optional. The maximum cumulative probability of tokens to consider when sampling. - `top_k` (int): Optional. The maximum number of tokens to consider when sampling. """ candidate_count: Optional[int] = None stop_sequences: Optional[list] = None max_output_tokens: Optional[int] = None temperature: Optional[float] = None top_p: Optional[float] = None top_k: Optional[int] = None def __init__( self, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, max_output_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None }
(candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, max_output_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None) -> None
63,286
litellm.llms.gemini
__init__
null
def __init__( self, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, max_output_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, candidate_count: Optional[int] = None, stop_sequences: Optional[list] = None, max_output_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None) -> NoneType
63,287
litellm.types.router
GenericLiteLLMParams
LiteLLM Params without 'model' arg (used across completion / assistants api)
class GenericLiteLLMParams(BaseModel): """ LiteLLM Params without 'model' arg (used across completion / assistants api) """ custom_llm_provider: Optional[str] = None tpm: Optional[int] = None rpm: Optional[int] = None api_key: Optional[str] = None api_base: Optional[str] = None api_version: Optional[str] = None timeout: Optional[Union[float, str, httpx.Timeout]] = ( None # if str, pass in as os.environ/ ) stream_timeout: Optional[Union[float, str]] = ( None # timeout when making stream=True calls, if str, pass in as os.environ/ ) max_retries: Optional[int] = None organization: Optional[str] = None # for openai orgs ## UNIFIED PROJECT/REGION ## region_name: Optional[str] = None ## VERTEX AI ## vertex_project: Optional[str] = None vertex_location: Optional[str] = None ## AWS BEDROCK / SAGEMAKER ## aws_access_key_id: Optional[str] = None aws_secret_access_key: Optional[str] = None aws_region_name: Optional[str] = None ## IBM WATSONX ## watsonx_region_name: Optional[str] = None ## CUSTOM PRICING ## input_cost_per_token: Optional[float] = None output_cost_per_token: Optional[float] = None input_cost_per_second: Optional[float] = None output_cost_per_second: Optional[float] = None def __init__( self, custom_llm_provider: Optional[str] = None, max_retries: Optional[Union[int, str]] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Optional[Union[float, str]] = None, # if str, pass in as os.environ/ stream_timeout: Optional[Union[float, str]] = ( None # timeout when making stream=True calls, if str, pass in as os.environ/ ), organization: Optional[str] = None, # for openai orgs ## UNIFIED PROJECT/REGION ## region_name: Optional[str] = None, ## VERTEX AI ## vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, ## AWS BEDROCK / SAGEMAKER ## aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, ## IBM WATSONX ## watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, output_cost_per_second: Optional[float] = None, **params ): args = locals() args.pop("max_retries", None) args.pop("self", None) args.pop("params", None) args.pop("__class__", None) if max_retries is not None and isinstance(max_retries, str): max_retries = int(max_retries) # cast to int super().__init__(max_retries=max_retries, **args, **params) class Config: extra = "allow" arbitrary_types_allowed = True def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist return getattr(self, key, default) def __getitem__(self, key): # Allow dictionary-style access to attributes return getattr(self, key) def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value)
(custom_llm_provider: Optional[str] = None, max_retries: Optional[int] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Union[float, str, openai.Timeout, NoneType] = None, stream_timeout: Union[float, str, NoneType] = None, organization: Optional[str] = None, region_name: Optional[str] = None, vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, output_cost_per_second: Optional[float] = None, **params) -> None
63,296
litellm.types.router
__init__
null
def __init__( self, custom_llm_provider: Optional[str] = None, max_retries: Optional[Union[int, str]] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Optional[Union[float, str]] = None, # if str, pass in as os.environ/ stream_timeout: Optional[Union[float, str]] = ( None # timeout when making stream=True calls, if str, pass in as os.environ/ ), organization: Optional[str] = None, # for openai orgs ## UNIFIED PROJECT/REGION ## region_name: Optional[str] = None, ## VERTEX AI ## vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, ## AWS BEDROCK / SAGEMAKER ## aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, ## IBM WATSONX ## watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, output_cost_per_second: Optional[float] = None, **params ): args = locals() args.pop("max_retries", None) args.pop("self", None) args.pop("params", None) args.pop("__class__", None) if max_retries is not None and isinstance(max_retries, str): max_retries = int(max_retries) # cast to int super().__init__(max_retries=max_retries, **args, **params)
(self, custom_llm_provider: Optional[str] = None, max_retries: Union[str, int, NoneType] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, timeout: Union[float, str, NoneType] = None, stream_timeout: Union[float, str, NoneType] = None, organization: Optional[str] = None, region_name: Optional[str] = None, vertex_project: Optional[str] = None, vertex_location: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, output_cost_per_second: Optional[float] = None, **params)
63,320
litellm.llms.huggingface_restapi
Huggingface
null
class Huggingface(BaseLLM): _client_session: Optional[httpx.Client] = None _aclient_session: Optional[httpx.AsyncClient] = None def __init__(self) -> None: super().__init__() def validate_environment(self, api_key, headers): default_headers = { "content-type": "application/json", } if api_key and headers is None: default_headers["Authorization"] = ( f"Bearer {api_key}" # Huggingface Inference Endpoint default is to accept bearer tokens ) headers = default_headers elif headers: headers = headers else: headers = default_headers return headers def convert_to_model_response_object( self, completion_response, model_response, task: hf_tasks, optional_params, encoding, input_text, model, ): if task == "conversational": if len(completion_response["generated_text"]) > 0: # type: ignore model_response["choices"][0]["message"][ "content" ] = completion_response[ "generated_text" ] # type: ignore elif task == "text-generation-inference": if ( not isinstance(completion_response, list) or not isinstance(completion_response[0], dict) or "generated_text" not in completion_response[0] ): raise HuggingfaceError( status_code=422, message=f"response is not in expected format - {completion_response}", ) if len(completion_response[0]["generated_text"]) > 0: model_response["choices"][0]["message"]["content"] = output_parser( completion_response[0]["generated_text"] ) ## GETTING LOGPROBS + FINISH REASON if ( "details" in completion_response[0] and "tokens" in completion_response[0]["details"] ): model_response.choices[0].finish_reason = completion_response[0][ "details" ]["finish_reason"] sum_logprob = 0 for token in completion_response[0]["details"]["tokens"]: if token["logprob"] != None: sum_logprob += token["logprob"] model_response["choices"][0]["message"]._logprob = sum_logprob if "best_of" in optional_params and optional_params["best_of"] > 1: if ( "details" in completion_response[0] and "best_of_sequences" in completion_response[0]["details"] ): choices_list = [] for idx, item in enumerate( completion_response[0]["details"]["best_of_sequences"] ): sum_logprob = 0 for token in item["tokens"]: if token["logprob"] != None: sum_logprob += token["logprob"] if len(item["generated_text"]) > 0: message_obj = Message( content=output_parser(item["generated_text"]), logprobs=sum_logprob, ) else: message_obj = Message(content=None) choice_obj = Choices( finish_reason=item["finish_reason"], index=idx + 1, message=message_obj, ) choices_list.append(choice_obj) model_response["choices"].extend(choices_list) elif task == "text-classification": model_response["choices"][0]["message"]["content"] = json.dumps( completion_response ) else: if len(completion_response[0]["generated_text"]) > 0: model_response["choices"][0]["message"]["content"] = output_parser( completion_response[0]["generated_text"] ) ## CALCULATING USAGE prompt_tokens = 0 try: prompt_tokens = len( encoding.encode(input_text) ) ##[TODO] use the llama2 tokenizer here except: # this should remain non blocking we should not block a response returning if calculating usage fails pass output_text = model_response["choices"][0]["message"].get("content", "") if output_text is not None and len(output_text) > 0: completion_tokens = 0 try: completion_tokens = len( encoding.encode( model_response["choices"][0]["message"].get("content", "") ) ) ##[TODO] use the llama2 tokenizer here except: # this should remain non blocking we should not block a response returning if calculating usage fails pass else: completion_tokens = 0 model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) model_response.usage = usage model_response._hidden_params["original_response"] = completion_response return model_response def completion( self, model: str, messages: list, api_base: Optional[str], headers: Optional[dict], model_response: ModelResponse, print_verbose: Callable, timeout: float, encoding, api_key, logging_obj, optional_params: dict, custom_prompt_dict={}, acompletion: bool = False, litellm_params=None, logger_fn=None, ): super().completion() exception_mapping_worked = False try: headers = self.validate_environment(api_key, headers) task = get_hf_task_for_model(model) ## VALIDATE API FORMAT if task is None or not isinstance(task, str) or task not in hf_task_list: raise Exception( "Invalid hf task - {}. Valid formats - {}.".format(task, hf_tasks) ) print_verbose(f"{model}, {task}") completion_url = "" input_text = "" if "https" in model: completion_url = model elif api_base: completion_url = api_base elif "HF_API_BASE" in os.environ: completion_url = os.getenv("HF_API_BASE", "") elif "HUGGINGFACE_API_BASE" in os.environ: completion_url = os.getenv("HUGGINGFACE_API_BASE", "") else: completion_url = f"https://api-inference.huggingface.co/models/{model}" ## Load Config config = litellm.HuggingfaceConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > huggingfaceConfig(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v ### MAP INPUT PARAMS if task == "conversational": inference_params = copy.deepcopy(optional_params) inference_params.pop("details") inference_params.pop("return_full_text") past_user_inputs = [] generated_responses = [] text = "" for message in messages: if message["role"] == "user": if text != "": past_user_inputs.append(text) text = message["content"] elif message["role"] == "assistant" or message["role"] == "system": generated_responses.append(message["content"]) data = { "inputs": { "text": text, "past_user_inputs": past_user_inputs, "generated_responses": generated_responses, }, "parameters": inference_params, } input_text = "".join(message["content"] for message in messages) elif task == "text-generation-inference": # always send "details" and "return_full_text" as params if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details.get("roles", None), initial_prompt_value=model_prompt_details.get( "initial_prompt_value", "" ), final_prompt_value=model_prompt_details.get( "final_prompt_value", "" ), messages=messages, ) else: prompt = prompt_factory(model=model, messages=messages) data = { "inputs": prompt, "parameters": optional_params, "stream": ( # type: ignore True if "stream" in optional_params and isinstance(optional_params["stream"], bool) and optional_params["stream"] == True # type: ignore else False ), } input_text = prompt else: # Non TGI and Conversational llms # We need this branch, it removes 'details' and 'return_full_text' from params if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details.get("roles", {}), initial_prompt_value=model_prompt_details.get( "initial_prompt_value", "" ), final_prompt_value=model_prompt_details.get( "final_prompt_value", "" ), bos_token=model_prompt_details.get("bos_token", ""), eos_token=model_prompt_details.get("eos_token", ""), messages=messages, ) else: prompt = prompt_factory(model=model, messages=messages) inference_params = copy.deepcopy(optional_params) inference_params.pop("details") inference_params.pop("return_full_text") data = { "inputs": prompt, } if task == "text-generation-inference": data["parameters"] = inference_params data["stream"] = ( # type: ignore True if "stream" in optional_params and optional_params["stream"] == True else False ) input_text = prompt ## LOGGING logging_obj.pre_call( input=input_text, api_key=api_key, additional_args={ "complete_input_dict": data, "task": task, "headers": headers, "api_base": completion_url, "acompletion": acompletion, }, ) ## COMPLETION CALL if acompletion is True: ### ASYNC STREAMING if optional_params.get("stream", False): return self.async_streaming(logging_obj=logging_obj, api_base=completion_url, data=data, headers=headers, model_response=model_response, model=model, timeout=timeout) # type: ignore else: ### ASYNC COMPLETION return self.acompletion(api_base=completion_url, data=data, headers=headers, model_response=model_response, task=task, encoding=encoding, input_text=input_text, model=model, optional_params=optional_params, timeout=timeout) # type: ignore ### SYNC STREAMING if "stream" in optional_params and optional_params["stream"] == True: response = requests.post( completion_url, headers=headers, data=json.dumps(data), stream=optional_params["stream"], ) return response.iter_lines() ### SYNC COMPLETION else: response = requests.post( completion_url, headers=headers, data=json.dumps(data) ) ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten) is_streamed = False if ( response.__dict__["headers"].get("Content-Type", "") == "text/event-stream" ): is_streamed = True # iterate over the complete streamed response, and return the final answer if is_streamed: streamed_response = CustomStreamWrapper( completion_stream=response.iter_lines(), model=model, custom_llm_provider="huggingface", logging_obj=logging_obj, ) content = "" for chunk in streamed_response: content += chunk["choices"][0]["delta"]["content"] completion_response: List[Dict[str, Any]] = [ {"generated_text": content} ] ## LOGGING logging_obj.post_call( input=input_text, api_key=api_key, original_response=completion_response, additional_args={"complete_input_dict": data, "task": task}, ) else: ## LOGGING logging_obj.post_call( input=input_text, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data, "task": task}, ) ## RESPONSE OBJECT try: completion_response = response.json() if isinstance(completion_response, dict): completion_response = [completion_response] except: import traceback raise HuggingfaceError( message=f"Original Response received: {response.text}; Stacktrace: {traceback.format_exc()}", status_code=response.status_code, ) print_verbose(f"response: {completion_response}") if ( isinstance(completion_response, dict) and "error" in completion_response ): print_verbose(f"completion error: {completion_response['error']}") # type: ignore print_verbose(f"response.status_code: {response.status_code}") raise HuggingfaceError( message=completion_response["error"], # type: ignore status_code=response.status_code, ) return self.convert_to_model_response_object( completion_response=completion_response, model_response=model_response, task=task, optional_params=optional_params, encoding=encoding, input_text=input_text, model=model, ) except HuggingfaceError as e: exception_mapping_worked = True raise e except Exception as e: if exception_mapping_worked: raise e else: import traceback raise HuggingfaceError(status_code=500, message=traceback.format_exc()) async def acompletion( self, api_base: str, data: dict, headers: dict, model_response: ModelResponse, task: hf_tasks, encoding: Any, input_text: str, model: str, optional_params: dict, timeout: float, ): response = None try: async with httpx.AsyncClient(timeout=timeout) as client: response = await client.post(url=api_base, json=data, headers=headers) response_json = response.json() if response.status_code != 200: if "error" in response_json: raise HuggingfaceError( status_code=response.status_code, message=response_json["error"], request=response.request, response=response, ) else: raise HuggingfaceError( status_code=response.status_code, message=response.text, request=response.request, response=response, ) ## RESPONSE OBJECT return self.convert_to_model_response_object( completion_response=response_json, model_response=model_response, task=task, encoding=encoding, input_text=input_text, model=model, optional_params=optional_params, ) except Exception as e: if isinstance(e, httpx.TimeoutException): raise HuggingfaceError(status_code=500, message="Request Timeout Error") elif isinstance(e, HuggingfaceError): raise e elif response is not None and hasattr(response, "text"): raise HuggingfaceError( status_code=500, message=f"{str(e)}\n\nOriginal Response: {response.text}", ) else: raise HuggingfaceError(status_code=500, message=f"{str(e)}") async def async_streaming( self, logging_obj, api_base: str, data: dict, headers: dict, model_response: ModelResponse, model: str, timeout: float, ): async with httpx.AsyncClient(timeout=timeout) as client: response = client.stream( "POST", url=f"{api_base}", json=data, headers=headers ) async with response as r: if r.status_code != 200: text = await r.aread() raise HuggingfaceError( status_code=r.status_code, message=str(text), ) """ Check first chunk for error message. If error message, raise error. If not - add back to stream """ # Async iterator over the lines in the response body response_iterator = r.aiter_lines() # Attempt to get the first line/chunk from the response try: first_chunk = await response_iterator.__anext__() except StopAsyncIteration: # Handle the case where there are no lines to read (empty response) first_chunk = "" # Check the first chunk for an error message if ( "error" in first_chunk.lower() ): # Adjust this condition based on how error messages are structured raise HuggingfaceError( status_code=400, message=first_chunk, ) # Create a new async generator that begins with the first_chunk and includes the remaining items async def custom_stream_with_first_chunk(): yield first_chunk # Yield back the first chunk async for ( chunk ) in response_iterator: # Continue yielding the rest of the chunks yield chunk # Creating a new completion stream that starts with the first chunk completion_stream = custom_stream_with_first_chunk() streamwrapper = CustomStreamWrapper( completion_stream=completion_stream, model=model, custom_llm_provider="huggingface", logging_obj=logging_obj, ) async for transformed_chunk in streamwrapper: yield transformed_chunk def embedding( self, model: str, input: list, api_key: Optional[str] = None, api_base: Optional[str] = None, logging_obj=None, model_response=None, encoding=None, ): super().embedding() headers = self.validate_environment(api_key, headers=None) # print_verbose(f"{model}, {task}") embed_url = "" if "https" in model: embed_url = model elif api_base: embed_url = api_base elif "HF_API_BASE" in os.environ: embed_url = os.getenv("HF_API_BASE", "") elif "HUGGINGFACE_API_BASE" in os.environ: embed_url = os.getenv("HUGGINGFACE_API_BASE", "") else: embed_url = f"https://api-inference.huggingface.co/models/{model}" if "sentence-transformers" in model: if len(input) == 0: raise HuggingfaceError( status_code=400, message="sentence transformers requires 2+ sentences", ) data = { "inputs": { "source_sentence": input[0], "sentences": [ "That is a happy dog", "That is a very happy person", "Today is a sunny day", ], } } else: data = {"inputs": input} # type: ignore ## LOGGING logging_obj.pre_call( input=input, api_key=api_key, additional_args={ "complete_input_dict": data, "headers": headers, "api_base": embed_url, }, ) ## COMPLETION CALL response = requests.post(embed_url, headers=headers, data=json.dumps(data)) ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=response, ) embeddings = response.json() if "error" in embeddings: raise HuggingfaceError(status_code=500, message=embeddings["error"]) output_data = [] if "similarities" in embeddings: for idx, embedding in embeddings["similarities"]: output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) else: for idx, embedding in enumerate(embeddings): if isinstance(embedding, float): output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) elif isinstance(embedding, list) and isinstance(embedding[0], float): output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) else: output_data.append( { "object": "embedding", "index": idx, "embedding": embedding[0][ 0 ], # flatten list returned from hf } ) model_response["object"] = "list" model_response["data"] = output_data model_response["model"] = model input_tokens = 0 for text in input: input_tokens += len(encoding.encode(text)) model_response["usage"] = { "prompt_tokens": input_tokens, "total_tokens": input_tokens, } return model_response
() -> None
63,324
litellm.llms.huggingface_restapi
acompletion
null
def completion( self, model: str, messages: list, api_base: Optional[str], headers: Optional[dict], model_response: ModelResponse, print_verbose: Callable, timeout: float, encoding, api_key, logging_obj, optional_params: dict, custom_prompt_dict={}, acompletion: bool = False, litellm_params=None, logger_fn=None, ): super().completion() exception_mapping_worked = False try: headers = self.validate_environment(api_key, headers) task = get_hf_task_for_model(model) ## VALIDATE API FORMAT if task is None or not isinstance(task, str) or task not in hf_task_list: raise Exception( "Invalid hf task - {}. Valid formats - {}.".format(task, hf_tasks) ) print_verbose(f"{model}, {task}") completion_url = "" input_text = "" if "https" in model: completion_url = model elif api_base: completion_url = api_base elif "HF_API_BASE" in os.environ: completion_url = os.getenv("HF_API_BASE", "") elif "HUGGINGFACE_API_BASE" in os.environ: completion_url = os.getenv("HUGGINGFACE_API_BASE", "") else: completion_url = f"https://api-inference.huggingface.co/models/{model}" ## Load Config config = litellm.HuggingfaceConfig.get_config() for k, v in config.items(): if ( k not in optional_params ): # completion(top_k=3) > huggingfaceConfig(top_k=3) <- allows for dynamic variables to be passed in optional_params[k] = v ### MAP INPUT PARAMS if task == "conversational": inference_params = copy.deepcopy(optional_params) inference_params.pop("details") inference_params.pop("return_full_text") past_user_inputs = [] generated_responses = [] text = "" for message in messages: if message["role"] == "user": if text != "": past_user_inputs.append(text) text = message["content"] elif message["role"] == "assistant" or message["role"] == "system": generated_responses.append(message["content"]) data = { "inputs": { "text": text, "past_user_inputs": past_user_inputs, "generated_responses": generated_responses, }, "parameters": inference_params, } input_text = "".join(message["content"] for message in messages) elif task == "text-generation-inference": # always send "details" and "return_full_text" as params if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details.get("roles", None), initial_prompt_value=model_prompt_details.get( "initial_prompt_value", "" ), final_prompt_value=model_prompt_details.get( "final_prompt_value", "" ), messages=messages, ) else: prompt = prompt_factory(model=model, messages=messages) data = { "inputs": prompt, "parameters": optional_params, "stream": ( # type: ignore True if "stream" in optional_params and isinstance(optional_params["stream"], bool) and optional_params["stream"] == True # type: ignore else False ), } input_text = prompt else: # Non TGI and Conversational llms # We need this branch, it removes 'details' and 'return_full_text' from params if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] prompt = custom_prompt( role_dict=model_prompt_details.get("roles", {}), initial_prompt_value=model_prompt_details.get( "initial_prompt_value", "" ), final_prompt_value=model_prompt_details.get( "final_prompt_value", "" ), bos_token=model_prompt_details.get("bos_token", ""), eos_token=model_prompt_details.get("eos_token", ""), messages=messages, ) else: prompt = prompt_factory(model=model, messages=messages) inference_params = copy.deepcopy(optional_params) inference_params.pop("details") inference_params.pop("return_full_text") data = { "inputs": prompt, } if task == "text-generation-inference": data["parameters"] = inference_params data["stream"] = ( # type: ignore True if "stream" in optional_params and optional_params["stream"] == True else False ) input_text = prompt ## LOGGING logging_obj.pre_call( input=input_text, api_key=api_key, additional_args={ "complete_input_dict": data, "task": task, "headers": headers, "api_base": completion_url, "acompletion": acompletion, }, ) ## COMPLETION CALL if acompletion is True: ### ASYNC STREAMING if optional_params.get("stream", False): return self.async_streaming(logging_obj=logging_obj, api_base=completion_url, data=data, headers=headers, model_response=model_response, model=model, timeout=timeout) # type: ignore else: ### ASYNC COMPLETION return self.acompletion(api_base=completion_url, data=data, headers=headers, model_response=model_response, task=task, encoding=encoding, input_text=input_text, model=model, optional_params=optional_params, timeout=timeout) # type: ignore ### SYNC STREAMING if "stream" in optional_params and optional_params["stream"] == True: response = requests.post( completion_url, headers=headers, data=json.dumps(data), stream=optional_params["stream"], ) return response.iter_lines() ### SYNC COMPLETION else: response = requests.post( completion_url, headers=headers, data=json.dumps(data) ) ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten) is_streamed = False if ( response.__dict__["headers"].get("Content-Type", "") == "text/event-stream" ): is_streamed = True # iterate over the complete streamed response, and return the final answer if is_streamed: streamed_response = CustomStreamWrapper( completion_stream=response.iter_lines(), model=model, custom_llm_provider="huggingface", logging_obj=logging_obj, ) content = "" for chunk in streamed_response: content += chunk["choices"][0]["delta"]["content"] completion_response: List[Dict[str, Any]] = [ {"generated_text": content} ] ## LOGGING logging_obj.post_call( input=input_text, api_key=api_key, original_response=completion_response, additional_args={"complete_input_dict": data, "task": task}, ) else: ## LOGGING logging_obj.post_call( input=input_text, api_key=api_key, original_response=response.text, additional_args={"complete_input_dict": data, "task": task}, ) ## RESPONSE OBJECT try: completion_response = response.json() if isinstance(completion_response, dict): completion_response = [completion_response] except: import traceback raise HuggingfaceError( message=f"Original Response received: {response.text}; Stacktrace: {traceback.format_exc()}", status_code=response.status_code, ) print_verbose(f"response: {completion_response}") if ( isinstance(completion_response, dict) and "error" in completion_response ): print_verbose(f"completion error: {completion_response['error']}") # type: ignore print_verbose(f"response.status_code: {response.status_code}") raise HuggingfaceError( message=completion_response["error"], # type: ignore status_code=response.status_code, ) return self.convert_to_model_response_object( completion_response=completion_response, model_response=model_response, task=task, optional_params=optional_params, encoding=encoding, input_text=input_text, model=model, ) except HuggingfaceError as e: exception_mapping_worked = True raise e except Exception as e: if exception_mapping_worked: raise e else: import traceback raise HuggingfaceError(status_code=500, message=traceback.format_exc())
(self, api_base: str, data: dict, headers: dict, model_response: litellm.utils.ModelResponse, task: Literal['text-generation-inference', 'conversational', 'text-classification', 'text-generation'], encoding: Any, input_text: str, model: str, optional_params: dict, timeout: float)
63,327
litellm.llms.huggingface_restapi
convert_to_model_response_object
null
def convert_to_model_response_object( self, completion_response, model_response, task: hf_tasks, optional_params, encoding, input_text, model, ): if task == "conversational": if len(completion_response["generated_text"]) > 0: # type: ignore model_response["choices"][0]["message"][ "content" ] = completion_response[ "generated_text" ] # type: ignore elif task == "text-generation-inference": if ( not isinstance(completion_response, list) or not isinstance(completion_response[0], dict) or "generated_text" not in completion_response[0] ): raise HuggingfaceError( status_code=422, message=f"response is not in expected format - {completion_response}", ) if len(completion_response[0]["generated_text"]) > 0: model_response["choices"][0]["message"]["content"] = output_parser( completion_response[0]["generated_text"] ) ## GETTING LOGPROBS + FINISH REASON if ( "details" in completion_response[0] and "tokens" in completion_response[0]["details"] ): model_response.choices[0].finish_reason = completion_response[0][ "details" ]["finish_reason"] sum_logprob = 0 for token in completion_response[0]["details"]["tokens"]: if token["logprob"] != None: sum_logprob += token["logprob"] model_response["choices"][0]["message"]._logprob = sum_logprob if "best_of" in optional_params and optional_params["best_of"] > 1: if ( "details" in completion_response[0] and "best_of_sequences" in completion_response[0]["details"] ): choices_list = [] for idx, item in enumerate( completion_response[0]["details"]["best_of_sequences"] ): sum_logprob = 0 for token in item["tokens"]: if token["logprob"] != None: sum_logprob += token["logprob"] if len(item["generated_text"]) > 0: message_obj = Message( content=output_parser(item["generated_text"]), logprobs=sum_logprob, ) else: message_obj = Message(content=None) choice_obj = Choices( finish_reason=item["finish_reason"], index=idx + 1, message=message_obj, ) choices_list.append(choice_obj) model_response["choices"].extend(choices_list) elif task == "text-classification": model_response["choices"][0]["message"]["content"] = json.dumps( completion_response ) else: if len(completion_response[0]["generated_text"]) > 0: model_response["choices"][0]["message"]["content"] = output_parser( completion_response[0]["generated_text"] ) ## CALCULATING USAGE prompt_tokens = 0 try: prompt_tokens = len( encoding.encode(input_text) ) ##[TODO] use the llama2 tokenizer here except: # this should remain non blocking we should not block a response returning if calculating usage fails pass output_text = model_response["choices"][0]["message"].get("content", "") if output_text is not None and len(output_text) > 0: completion_tokens = 0 try: completion_tokens = len( encoding.encode( model_response["choices"][0]["message"].get("content", "") ) ) ##[TODO] use the llama2 tokenizer here except: # this should remain non blocking we should not block a response returning if calculating usage fails pass else: completion_tokens = 0 model_response["created"] = int(time.time()) model_response["model"] = model usage = Usage( prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens, ) model_response.usage = usage model_response._hidden_params["original_response"] = completion_response return model_response
(self, completion_response, model_response, task: Literal['text-generation-inference', 'conversational', 'text-classification', 'text-generation'], optional_params, encoding, input_text, model)
63,330
litellm.llms.huggingface_restapi
embedding
null
def embedding( self, model: str, input: list, api_key: Optional[str] = None, api_base: Optional[str] = None, logging_obj=None, model_response=None, encoding=None, ): super().embedding() headers = self.validate_environment(api_key, headers=None) # print_verbose(f"{model}, {task}") embed_url = "" if "https" in model: embed_url = model elif api_base: embed_url = api_base elif "HF_API_BASE" in os.environ: embed_url = os.getenv("HF_API_BASE", "") elif "HUGGINGFACE_API_BASE" in os.environ: embed_url = os.getenv("HUGGINGFACE_API_BASE", "") else: embed_url = f"https://api-inference.huggingface.co/models/{model}" if "sentence-transformers" in model: if len(input) == 0: raise HuggingfaceError( status_code=400, message="sentence transformers requires 2+ sentences", ) data = { "inputs": { "source_sentence": input[0], "sentences": [ "That is a happy dog", "That is a very happy person", "Today is a sunny day", ], } } else: data = {"inputs": input} # type: ignore ## LOGGING logging_obj.pre_call( input=input, api_key=api_key, additional_args={ "complete_input_dict": data, "headers": headers, "api_base": embed_url, }, ) ## COMPLETION CALL response = requests.post(embed_url, headers=headers, data=json.dumps(data)) ## LOGGING logging_obj.post_call( input=input, api_key=api_key, additional_args={"complete_input_dict": data}, original_response=response, ) embeddings = response.json() if "error" in embeddings: raise HuggingfaceError(status_code=500, message=embeddings["error"]) output_data = [] if "similarities" in embeddings: for idx, embedding in embeddings["similarities"]: output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) else: for idx, embedding in enumerate(embeddings): if isinstance(embedding, float): output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) elif isinstance(embedding, list) and isinstance(embedding[0], float): output_data.append( { "object": "embedding", "index": idx, "embedding": embedding, # flatten list returned from hf } ) else: output_data.append( { "object": "embedding", "index": idx, "embedding": embedding[0][ 0 ], # flatten list returned from hf } ) model_response["object"] = "list" model_response["data"] = output_data model_response["model"] = model input_tokens = 0 for text in input: input_tokens += len(encoding.encode(text)) model_response["usage"] = { "prompt_tokens": input_tokens, "total_tokens": input_tokens, } return model_response
(self, model: str, input: list, api_key: Optional[str] = None, api_base: Optional[str] = None, logging_obj=None, model_response=None, encoding=None)
63,332
litellm.llms.huggingface_restapi
validate_environment
null
def validate_environment(self, api_key, headers): default_headers = { "content-type": "application/json", } if api_key and headers is None: default_headers["Authorization"] = ( f"Bearer {api_key}" # Huggingface Inference Endpoint default is to accept bearer tokens ) headers = default_headers elif headers: headers = headers else: headers = default_headers return headers
(self, api_key, headers)
63,333
litellm.llms.huggingface_restapi
HuggingfaceConfig
Reference: https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/compat_generate
class HuggingfaceConfig: """ Reference: https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/compat_generate """ hf_task: Optional[hf_tasks] = ( None # litellm-specific param, used to know the api spec to use when calling huggingface api ) best_of: Optional[int] = None decoder_input_details: Optional[bool] = None details: Optional[bool] = True # enables returning logprobs + best of max_new_tokens: Optional[int] = None repetition_penalty: Optional[float] = None return_full_text: Optional[bool] = ( False # by default don't return the input as part of the output ) seed: Optional[int] = None temperature: Optional[float] = None top_k: Optional[int] = None top_n_tokens: Optional[int] = None top_p: Optional[int] = None truncate: Optional[int] = None typical_p: Optional[float] = None watermark: Optional[bool] = None def __init__( self, best_of: Optional[int] = None, decoder_input_details: Optional[bool] = None, details: Optional[bool] = None, max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: Optional[bool] = None, seed: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_n_tokens: Optional[int] = None, top_p: Optional[int] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "stream", "temperature", "max_tokens", "top_p", "stop", "n", "echo", ] def map_openai_params( self, non_default_params: dict, optional_params: dict ) -> dict: for param, value in non_default_params.items(): # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None if param == "temperature": if value == 0.0 or value == 0: # hugging face exception raised when temp==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive value = 0.01 optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if param == "n": optional_params["best_of"] = value optional_params["do_sample"] = ( True # Need to sample if you want best of for hf inference endpoints ) if param == "stream": optional_params["stream"] = value if param == "stop": optional_params["stop"] = value if param == "max_tokens": # HF TGI raises the following exception when max_new_tokens==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive if value == 0: value = 1 optional_params["max_new_tokens"] = value if param == "echo": # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details # Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False optional_params["decoder_input_details"] = True return optional_params
(best_of: Optional[int] = None, decoder_input_details: Optional[bool] = None, details: Optional[bool] = None, max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: Optional[bool] = None, seed: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_n_tokens: Optional[int] = None, top_p: Optional[int] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: Optional[bool] = None) -> None
63,334
litellm.llms.huggingface_restapi
__init__
null
def __init__( self, best_of: Optional[int] = None, decoder_input_details: Optional[bool] = None, details: Optional[bool] = None, max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: Optional[bool] = None, seed: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_n_tokens: Optional[int] = None, top_p: Optional[int] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: Optional[bool] = None, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value)
(self, best_of: Optional[int] = None, decoder_input_details: Optional[bool] = None, details: Optional[bool] = None, max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: Optional[bool] = None, seed: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_n_tokens: Optional[int] = None, top_p: Optional[int] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: Optional[bool] = None) -> NoneType
63,335
litellm.llms.huggingface_restapi
get_supported_openai_params
null
def get_supported_openai_params(self): return [ "stream", "temperature", "max_tokens", "top_p", "stop", "n", "echo", ]
(self)
63,336
litellm.llms.huggingface_restapi
map_openai_params
null
def map_openai_params( self, non_default_params: dict, optional_params: dict ) -> dict: for param, value in non_default_params.items(): # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None if param == "temperature": if value == 0.0 or value == 0: # hugging face exception raised when temp==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive value = 0.01 optional_params["temperature"] = value if param == "top_p": optional_params["top_p"] = value if param == "n": optional_params["best_of"] = value optional_params["do_sample"] = ( True # Need to sample if you want best of for hf inference endpoints ) if param == "stream": optional_params["stream"] = value if param == "stop": optional_params["stop"] = value if param == "max_tokens": # HF TGI raises the following exception when max_new_tokens==0 # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive if value == 0: value = 1 optional_params["max_new_tokens"] = value if param == "echo": # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details # Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False optional_params["decoder_input_details"] = True return optional_params
(self, non_default_params: dict, optional_params: dict) -> dict
63,337
litellm.llms.watsonx
IBMWatsonXAIConfig
Reference: https://cloud.ibm.com/apidocs/watsonx-ai#text-generation (See ibm_watsonx_ai.metanames.GenTextParamsMetaNames for a list of all available params) Supported params for all available watsonx.ai foundational models. - `decoding_method` (str): One of "greedy" or "sample" - `temperature` (float): Sets the model temperature for sampling - not available when decoding_method='greedy'. - `max_new_tokens` (integer): Maximum length of the generated tokens. - `min_new_tokens` (integer): Maximum length of input tokens. Any more than this will be truncated. - `length_penalty` (dict): A dictionary with keys "decay_factor" and "start_index". - `stop_sequences` (string[]): list of strings to use as stop sequences. - `top_k` (integer): top k for sampling - not available when decoding_method='greedy'. - `top_p` (integer): top p for sampling - not available when decoding_method='greedy'. - `repetition_penalty` (float): token repetition penalty during text generation. - `truncate_input_tokens` (integer): Truncate input tokens to this length. - `include_stop_sequences` (bool): If True, the stop sequence will be included at the end of the generated text in the case of a match. - `return_options` (dict): A dictionary of options to return. Options include "input_text", "generated_tokens", "input_tokens", "token_ranks". Values are boolean. - `random_seed` (integer): Random seed for text generation. - `moderations` (dict): Dictionary of properties that control the moderations, for usages such as Hate and profanity (HAP) and PII filtering. - `stream` (bool): If True, the model will return a stream of responses.
class IBMWatsonXAIConfig: """ Reference: https://cloud.ibm.com/apidocs/watsonx-ai#text-generation (See ibm_watsonx_ai.metanames.GenTextParamsMetaNames for a list of all available params) Supported params for all available watsonx.ai foundational models. - `decoding_method` (str): One of "greedy" or "sample" - `temperature` (float): Sets the model temperature for sampling - not available when decoding_method='greedy'. - `max_new_tokens` (integer): Maximum length of the generated tokens. - `min_new_tokens` (integer): Maximum length of input tokens. Any more than this will be truncated. - `length_penalty` (dict): A dictionary with keys "decay_factor" and "start_index". - `stop_sequences` (string[]): list of strings to use as stop sequences. - `top_k` (integer): top k for sampling - not available when decoding_method='greedy'. - `top_p` (integer): top p for sampling - not available when decoding_method='greedy'. - `repetition_penalty` (float): token repetition penalty during text generation. - `truncate_input_tokens` (integer): Truncate input tokens to this length. - `include_stop_sequences` (bool): If True, the stop sequence will be included at the end of the generated text in the case of a match. - `return_options` (dict): A dictionary of options to return. Options include "input_text", "generated_tokens", "input_tokens", "token_ranks". Values are boolean. - `random_seed` (integer): Random seed for text generation. - `moderations` (dict): Dictionary of properties that control the moderations, for usages such as Hate and profanity (HAP) and PII filtering. - `stream` (bool): If True, the model will return a stream of responses. """ decoding_method: Optional[str] = "sample" temperature: Optional[float] = None max_new_tokens: Optional[int] = None # litellm.max_tokens min_new_tokens: Optional[int] = None length_penalty: Optional[dict] = None # e.g {"decay_factor": 2.5, "start_index": 5} stop_sequences: Optional[List[str]] = None # e.g ["}", ")", "."] top_k: Optional[int] = None top_p: Optional[float] = None repetition_penalty: Optional[float] = None truncate_input_tokens: Optional[int] = None include_stop_sequences: Optional[bool] = False return_options: Optional[Dict[str, bool]] = None random_seed: Optional[int] = None # e.g 42 moderations: Optional[dict] = None stream: Optional[bool] = False def __init__( self, decoding_method: Optional[str] = None, temperature: Optional[float] = None, max_new_tokens: Optional[int] = None, min_new_tokens: Optional[int] = None, length_penalty: Optional[dict] = None, stop_sequences: Optional[List[str]] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, truncate_input_tokens: Optional[int] = None, include_stop_sequences: Optional[bool] = None, return_options: Optional[dict] = None, random_seed: Optional[int] = None, moderations: Optional[dict] = None, stream: Optional[bool] = None, **kwargs, ) -> None: locals_ = locals() for key, value in locals_.items(): if key != "self" and value is not None: setattr(self.__class__, key, value) @classmethod def get_config(cls): return { k: v for k, v in cls.__dict__.items() if not k.startswith("__") and not isinstance( v, ( types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod, ), ) and v is not None } def get_supported_openai_params(self): return [ "temperature", # equivalent to temperature "max_tokens", # equivalent to max_new_tokens "top_p", # equivalent to top_p "frequency_penalty", # equivalent to repetition_penalty "stop", # equivalent to stop_sequences "seed", # equivalent to random_seed "stream", # equivalent to stream ] def get_mapped_special_auth_params(self) -> dict: """ Common auth params across bedrock/vertex_ai/azure/watsonx """ return { "project": "watsonx_project", "region_name": "watsonx_region_name", "token": "watsonx_token", } def map_special_auth_params(self, non_default_params: dict, optional_params: dict): mapped_params = self.get_mapped_special_auth_params() for param, value in non_default_params.items(): if param in mapped_params: optional_params[mapped_params[param]] = value return optional_params def get_eu_regions(self) -> List[str]: """ Source: https://www.ibm.com/docs/en/watsonx/saas?topic=integrations-regional-availability """ return [ "eu-de", "eu-gb", ]
(decoding_method: Optional[str] = None, temperature: Optional[float] = None, max_new_tokens: Optional[int] = None, min_new_tokens: Optional[int] = None, length_penalty: Optional[dict] = None, stop_sequences: Optional[List[str]] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, truncate_input_tokens: Optional[int] = None, include_stop_sequences: Optional[bool] = None, return_options: Optional[Dict[str, bool]] = None, random_seed: Optional[int] = None, moderations: Optional[dict] = None, stream: Optional[bool] = None, **kwargs) -> None