akhaliq HF Staff commited on
Commit
46d7c11
·
1 Parent(s): f777467

add claude opus 4.1

Browse files
Files changed (2) hide show
  1. README.md +2 -0
  2. app.py +18 -0
README.md CHANGED
@@ -20,6 +20,7 @@ AnyCoder is an AI-powered code generator that helps you create applications by d
20
  ## Features
21
 
22
  - **Multi-Model Support**: Choose from Moonshot Kimi-K2, DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M1, Qwen3-235B-A22B, Qwen3-30B-A3B-Instruct-2507, Qwen3-30B-A3B-Thinking-2507, SmolLM3-3B, GLM-4.1V-9B-Thinking, Gemini 2.5 Flash and Gemini 2.5 Pro (OpenAI-compatible)
 
23
  - **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
24
  - **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
25
  - **Code Generation**: Generate code in HTML, Python, JS, and more. Special support for transformers.js apps (outputs index.html, index.js, style.css)
@@ -83,6 +84,7 @@ python app.py
83
  - GLM-4.1V-9B-Thinking (multimodal)
84
  - GPT-5 (via Poe)
85
  - Grok-4 (via Poe)
 
86
  - Gemini 2.5 Flash (OpenAI-compatible)
87
  - Gemini 2.5 Pro (OpenAI-compatible)
88
 
 
20
  ## Features
21
 
22
  - **Multi-Model Support**: Choose from Moonshot Kimi-K2, DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M1, Qwen3-235B-A22B, Qwen3-30B-A3B-Instruct-2507, Qwen3-30B-A3B-Thinking-2507, SmolLM3-3B, GLM-4.1V-9B-Thinking, Gemini 2.5 Flash and Gemini 2.5 Pro (OpenAI-compatible)
23
+ - Claude-Opus-4.1 (via Poe)
24
  - **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
25
  - **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
26
  - **Code Generation**: Generate code in HTML, Python, JS, and more. Special support for transformers.js apps (outputs index.html, index.js, style.css)
 
84
  - GLM-4.1V-9B-Thinking (multimodal)
85
  - GPT-5 (via Poe)
86
  - Grok-4 (via Poe)
87
+ - Claude-Opus-4.1 (via Poe)
88
  - Gemini 2.5 Flash (OpenAI-compatible)
89
  - Gemini 2.5 Pro (OpenAI-compatible)
90
 
app.py CHANGED
@@ -541,6 +541,11 @@ AVAILABLE_MODELS = [
541
  "name": "Grok-4",
542
  "id": "grok-4",
543
  "description": "Grok-4 model via Poe (OpenAI-compatible) for advanced tasks"
 
 
 
 
 
544
  }
545
  ]
546
 
@@ -654,6 +659,12 @@ def get_inference_client(model_id, provider="auto"):
654
  api_key=os.getenv("POE_API_KEY"),
655
  base_url="https://api.poe.com/v1"
656
  )
 
 
 
 
 
 
657
  elif model_id == "step-3":
658
  # Use StepFun API client for Step-3 model
659
  return OpenAI(
@@ -2685,6 +2696,13 @@ This will help me create a better design for you."""
2685
  stream=True,
2686
  max_tokens=16384
2687
  )
 
 
 
 
 
 
 
2688
  else:
2689
  completion = client.chat.completions.create(
2690
  model=_current_model["id"],
 
541
  "name": "Grok-4",
542
  "id": "grok-4",
543
  "description": "Grok-4 model via Poe (OpenAI-compatible) for advanced tasks"
544
+ },
545
+ {
546
+ "name": "Claude-Opus-4.1",
547
+ "id": "claude-opus-4.1",
548
+ "description": "Anthropic Claude Opus 4.1 via Poe (OpenAI-compatible)"
549
  }
550
  ]
551
 
 
659
  api_key=os.getenv("POE_API_KEY"),
660
  base_url="https://api.poe.com/v1"
661
  )
662
+ elif model_id == "claude-opus-4.1":
663
+ # Use Poe (OpenAI-compatible) client for Claude-Opus-4.1
664
+ return OpenAI(
665
+ api_key=os.getenv("POE_API_KEY"),
666
+ base_url="https://api.poe.com/v1"
667
+ )
668
  elif model_id == "step-3":
669
  # Use StepFun API client for Step-3 model
670
  return OpenAI(
 
2696
  stream=True,
2697
  max_tokens=16384
2698
  )
2699
+ elif _current_model["id"] == "claude-opus-4.1":
2700
+ completion = client.chat.completions.create(
2701
+ model="Claude-Opus-4.1",
2702
+ messages=messages,
2703
+ stream=True,
2704
+ max_tokens=16384
2705
+ )
2706
  else:
2707
  completion = client.chat.completions.create(
2708
  model=_current_model["id"],