Niansuh commited on
Commit
f894e42
·
verified ·
1 Parent(s): 65d2efc

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +13 -15
api/utils.py CHANGED
@@ -3,15 +3,12 @@ import json
3
  import uuid
4
  import asyncio
5
  import random
6
- import string
7
  from typing import Any, Dict, Optional
8
  import os
9
- from fastapi import HTTPException, Request, Response
10
- from fastapi.responses import JSONResponse
11
  from dotenv import load_dotenv
12
-
13
  import httpx
14
- from api import validate # Import validate to use getHid
15
  from api.config import (
16
  MODEL_MAPPING,
17
  get_headers_api_chat,
@@ -44,25 +41,26 @@ def check_rate_limit(ip: str):
44
  """Check if the IP has exceeded the request limit per minute."""
45
  current_time = datetime.now()
46
  if ip not in request_counts:
 
47
  request_counts[ip] = {"count": 1, "timestamp": current_time}
48
  logger.info(f"New IP {ip} added to request counts.")
49
  else:
50
  ip_data = request_counts[ip]
51
- # Reset the count if the timestamp is more than a minute old
52
- if current_time - ip_data["timestamp"] > timedelta(minutes=1):
53
- request_counts[ip] = {"count": 1, "timestamp": current_time}
54
- logger.info(f"Request count reset for IP {ip}.")
55
- else:
56
- # Increment the count and check if it exceeds the limit
57
  ip_data["count"] += 1
58
  logger.info(f"IP {ip} made request number {ip_data['count']}.")
59
-
60
  if ip_data["count"] > REQUEST_LIMIT_PER_MINUTE:
61
  logger.warning(f"Rate limit exceeded for IP {ip}.")
62
  raise HTTPException(
63
  status_code=429,
64
  detail={"error": {"message": "Rate limit exceeded. Please wait and try again.", "type": "rate_limit"}},
65
  )
 
 
 
 
66
 
67
  # Helper function to create chat completion data
68
  def create_chat_completion_data(
@@ -116,7 +114,7 @@ def get_referer_url() -> str:
116
 
117
  # Process streaming response with headers from config.py
118
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
119
- referer_url = get_referer_url() # Get the base URL without model-specific logic
120
  logger.info(f"Processing streaming response - Model: {request.model} - URL: {referer_url}")
121
 
122
  # Get the IP address and check rate limit
@@ -189,7 +187,7 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
189
 
190
  # Process non-streaming response with headers from config.py
191
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
192
- referer_url = get_referer_url() # Get the base URL without model-specific logic
193
  logger.info(f"Processing non-streaming response - Model: {request.model} - URL: {referer_url}")
194
 
195
  # Get the IP address and check rate limit
@@ -202,7 +200,7 @@ async def process_non_streaming_response(request: ChatRequest, request_obj: Requ
202
 
203
  headers_api_chat = get_headers_api_chat(referer_url)
204
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
205
- validated_token = validate.getHid() # Get the validated token from validate.py
206
 
207
  if request.model == 'o1-preview':
208
  delay_seconds = random.randint(20, 60)
 
3
  import uuid
4
  import asyncio
5
  import random
 
6
  from typing import Any, Dict, Optional
7
  import os
8
+ from fastapi import HTTPException, Request
 
9
  from dotenv import load_dotenv
 
10
  import httpx
11
+ from api import validate
12
  from api.config import (
13
  MODEL_MAPPING,
14
  get_headers_api_chat,
 
41
  """Check if the IP has exceeded the request limit per minute."""
42
  current_time = datetime.now()
43
  if ip not in request_counts:
44
+ # If the IP is new, initialize its counter and timestamp
45
  request_counts[ip] = {"count": 1, "timestamp": current_time}
46
  logger.info(f"New IP {ip} added to request counts.")
47
  else:
48
  ip_data = request_counts[ip]
49
+ # Check if the timestamp is more than a minute old
50
+ if current_time - ip_data["timestamp"] < timedelta(minutes=1):
51
+ # If within the same minute, increment the count
 
 
 
52
  ip_data["count"] += 1
53
  logger.info(f"IP {ip} made request number {ip_data['count']}.")
 
54
  if ip_data["count"] > REQUEST_LIMIT_PER_MINUTE:
55
  logger.warning(f"Rate limit exceeded for IP {ip}.")
56
  raise HTTPException(
57
  status_code=429,
58
  detail={"error": {"message": "Rate limit exceeded. Please wait and try again.", "type": "rate_limit"}},
59
  )
60
+ else:
61
+ # If more than a minute has passed, reset the count and timestamp
62
+ request_counts[ip] = {"count": 1, "timestamp": current_time}
63
+ logger.info(f"Request count reset for IP {ip}.")
64
 
65
  # Helper function to create chat completion data
66
  def create_chat_completion_data(
 
114
 
115
  # Process streaming response with headers from config.py
116
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
117
+ referer_url = get_referer_url()
118
  logger.info(f"Processing streaming response - Model: {request.model} - URL: {referer_url}")
119
 
120
  # Get the IP address and check rate limit
 
187
 
188
  # Process non-streaming response with headers from config.py
189
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
190
+ referer_url = get_referer_url()
191
  logger.info(f"Processing non-streaming response - Model: {request.model} - URL: {referer_url}")
192
 
193
  # Get the IP address and check rate limit
 
200
 
201
  headers_api_chat = get_headers_api_chat(referer_url)
202
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
203
+ validated_token = validate.getHid()
204
 
205
  if request.model == 'o1-preview':
206
  delay_seconds = random.randint(20, 60)