akhaliq HF Staff commited on
Commit
905ef08
·
1 Parent(s): d1c02ef

update models

Browse files
anycoder_app/config.py CHANGED
@@ -157,7 +157,7 @@ AVAILABLE_MODELS = [
157
  k2_model_name_tag = "moonshotai/Kimi-K2-Thinking"
158
 
159
  # Default model selection
160
- DEFAULT_MODEL_NAME = "MiniMax M2"
161
  DEFAULT_MODEL = None
162
  for _m in AVAILABLE_MODELS:
163
  if _m.get("name") == DEFAULT_MODEL_NAME:
 
157
  k2_model_name_tag = "moonshotai/Kimi-K2-Thinking"
158
 
159
  # Default model selection
160
+ DEFAULT_MODEL_NAME = "GLM-4.6"
161
  DEFAULT_MODEL = None
162
  for _m in AVAILABLE_MODELS:
163
  if _m.get("name") == DEFAULT_MODEL_NAME:
anycoder_app/models.py CHANGED
@@ -179,8 +179,8 @@ def get_inference_client(model_id, provider="auto"):
179
  base_url="https://api.moonshot.ai/v1",
180
  )
181
  elif model_id == "moonshotai/Kimi-K2-Thinking":
182
- # Use HuggingFace InferenceClient with Novita provider for Kimi K2 Thinking
183
- provider = "novita"
184
  elif model_id == "stealth-model-1":
185
  # Use stealth model with generic configuration
186
  api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
@@ -206,8 +206,8 @@ def get_inference_client(model_id, provider="auto"):
206
  elif model_id == "zai-org/GLM-4.5":
207
  provider = "fireworks-ai"
208
  elif model_id == "zai-org/GLM-4.6":
209
- # Use auto provider for GLM-4.6, HuggingFace will select best available
210
- provider = "auto"
211
  return InferenceClient(
212
  provider=provider,
213
  api_key=HF_TOKEN,
@@ -225,8 +225,11 @@ def get_real_model_id(model_id: str) -> str:
225
 
226
  return real_model_id
227
  elif model_id == "zai-org/GLM-4.6":
228
- # GLM-4.6 requires provider suffix in model string for API calls
229
- return "zai-org/GLM-4.6:zai-org"
 
 
 
230
  return model_id
231
 
232
  # Type definitions
 
179
  base_url="https://api.moonshot.ai/v1",
180
  )
181
  elif model_id == "moonshotai/Kimi-K2-Thinking":
182
+ # Use HuggingFace InferenceClient with Together AI provider for Kimi K2 Thinking
183
+ provider = "together"
184
  elif model_id == "stealth-model-1":
185
  # Use stealth model with generic configuration
186
  api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
 
206
  elif model_id == "zai-org/GLM-4.5":
207
  provider = "fireworks-ai"
208
  elif model_id == "zai-org/GLM-4.6":
209
+ # Use Cerebras provider for GLM-4.6 via HuggingFace
210
+ provider = "cerebras"
211
  return InferenceClient(
212
  provider=provider,
213
  api_key=HF_TOKEN,
 
225
 
226
  return real_model_id
227
  elif model_id == "zai-org/GLM-4.6":
228
+ # GLM-4.6 requires Cerebras provider suffix in model string for API calls
229
+ return "zai-org/GLM-4.6:cerebras"
230
+ elif model_id == "moonshotai/Kimi-K2-Thinking":
231
+ # Kimi K2 Thinking needs Together AI provider
232
+ return "moonshotai/Kimi-K2-Thinking:together"
233
  return model_id
234
 
235
  # Type definitions
backend_api.py CHANGED
@@ -99,6 +99,13 @@ AVAILABLE_MODELS = [
99
  {"name": "DeepSeek V3.2-Exp", "id": "deepseek-ai/DeepSeek-V3.2-Exp", "description": "DeepSeek V3.2 Experimental via HuggingFace"},
100
  {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1-0528", "description": "DeepSeek R1 model for code generation"},
101
  {"name": "GPT-5", "id": "gpt-5", "description": "OpenAI GPT-5 via OpenRouter"},
 
 
 
 
 
 
 
102
  {"name": "Gemini Flash Latest", "id": "gemini-flash-latest", "description": "Google Gemini Flash via OpenRouter"},
103
  {"name": "Qwen3 Max Preview", "id": "qwen3-max-preview", "description": "Qwen3 Max Preview via DashScope API"},
104
  ]
 
99
  {"name": "DeepSeek V3.2-Exp", "id": "deepseek-ai/DeepSeek-V3.2-Exp", "description": "DeepSeek V3.2 Experimental via HuggingFace"},
100
  {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1-0528", "description": "DeepSeek R1 model for code generation"},
101
  {"name": "GPT-5", "id": "gpt-5", "description": "OpenAI GPT-5 via OpenRouter"},
102
+ {"name": "GPT-5.1", "id": "gpt-5.1", "description": "OpenAI GPT-5.1 model via Poe for advanced code generation and general tasks"},
103
+ {"name": "GPT-5.1 Instant", "id": "gpt-5.1-instant", "description": "OpenAI GPT-5.1 Instant model via Poe for fast responses"},
104
+ {"name": "GPT-5.1 Codex", "id": "gpt-5.1-codex", "description": "OpenAI GPT-5.1 Codex model via Poe optimized for code generation"},
105
+ {"name": "Claude-Sonnet-4.5", "id": "claude-sonnet-4.5", "description": "Anthropic Claude Sonnet 4.5 via Poe (OpenAI-compatible)"},
106
+ {"name": "Claude-Haiku-4.5", "id": "claude-haiku-4.5", "description": "Anthropic Claude Haiku 4.5 via Poe (OpenAI-compatible)"},
107
+ {"name": "Kimi K2 Thinking", "id": "moonshotai/Kimi-K2-Thinking", "description": "Moonshot Kimi K2 Thinking model via HuggingFace with Together AI provider"},
108
+ {"name": "GLM-4.6", "id": "zai-org/GLM-4.6", "description": "GLM-4.6 model via HuggingFace with Cerebras provider"},
109
  {"name": "Gemini Flash Latest", "id": "gemini-flash-latest", "description": "Google Gemini Flash via OpenRouter"},
110
  {"name": "Qwen3 Max Preview", "id": "qwen3-max-preview", "description": "Qwen3 Max Preview via DashScope API"},
111
  ]
backend_models.py CHANGED
@@ -227,6 +227,14 @@ def get_inference_client(model_id: str, provider: str = "auto"):
227
  default_headers={"X-HF-Bill-To": "huggingface"}
228
  )
229
 
 
 
 
 
 
 
 
 
230
  elif model_id == "stealth-model-1":
231
  # Use stealth model with generic configuration
232
  api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
@@ -260,16 +268,16 @@ def get_real_model_id(model_id: str) -> str:
260
  return real_model_id
261
 
262
  elif model_id == "zai-org/GLM-4.6":
263
- # GLM-4.6 requires provider suffix in model string for API calls
264
- return "zai-org/GLM-4.6:zai-org"
265
 
266
  elif model_id == "MiniMaxAI/MiniMax-M2":
267
  # MiniMax M2 needs Novita provider suffix
268
  return "MiniMaxAI/MiniMax-M2:novita"
269
 
270
  elif model_id == "moonshotai/Kimi-K2-Thinking":
271
- # Kimi K2 Thinking needs Novita provider
272
- return "moonshotai/Kimi-K2-Thinking:novita"
273
 
274
  elif model_id == "moonshotai/Kimi-K2-Instruct":
275
  # Kimi K2 Instruct needs Groq provider
 
227
  default_headers={"X-HF-Bill-To": "huggingface"}
228
  )
229
 
230
+ elif model_id.startswith("moonshotai/Kimi-K2"):
231
+ # Kimi K2 models via HuggingFace Router
232
+ return OpenAI(
233
+ base_url="https://router.huggingface.co/v1",
234
+ api_key=os.getenv("HF_TOKEN"),
235
+ default_headers={"X-HF-Bill-To": "huggingface"}
236
+ )
237
+
238
  elif model_id == "stealth-model-1":
239
  # Use stealth model with generic configuration
240
  api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
 
268
  return real_model_id
269
 
270
  elif model_id == "zai-org/GLM-4.6":
271
+ # GLM-4.6 requires Cerebras provider suffix in model string for API calls
272
+ return "zai-org/GLM-4.6:cerebras"
273
 
274
  elif model_id == "MiniMaxAI/MiniMax-M2":
275
  # MiniMax M2 needs Novita provider suffix
276
  return "MiniMaxAI/MiniMax-M2:novita"
277
 
278
  elif model_id == "moonshotai/Kimi-K2-Thinking":
279
+ # Kimi K2 Thinking needs Together AI provider
280
+ return "moonshotai/Kimi-K2-Thinking:together"
281
 
282
  elif model_id == "moonshotai/Kimi-K2-Instruct":
283
  # Kimi K2 Instruct needs Groq provider
frontend/src/app/page.tsx CHANGED
@@ -17,7 +17,7 @@ export default function Home() {
17
 
18
  const [generatedCode, setGeneratedCode] = useState('');
19
  const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
20
- const [selectedModel, setSelectedModel] = useState('MiniMaxAI/MiniMax-M2');
21
  const [isGenerating, setIsGenerating] = useState(false);
22
  const [isAuthenticated, setIsAuthenticated] = useState(false);
23
  const [currentRepoId, setCurrentRepoId] = useState<string | null>(null); // Track imported/deployed space
 
17
 
18
  const [generatedCode, setGeneratedCode] = useState('');
19
  const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
20
+ const [selectedModel, setSelectedModel] = useState('zai-org/GLM-4.6');
21
  const [isGenerating, setIsGenerating] = useState(false);
22
  const [isAuthenticated, setIsAuthenticated] = useState(false);
23
  const [currentRepoId, setCurrentRepoId] = useState<string | null>(null); // Track imported/deployed space
frontend/src/components/LandingPage.tsx CHANGED
@@ -26,7 +26,7 @@ export default function LandingPage({
26
  onStart,
27
  isAuthenticated,
28
  initialLanguage = 'html',
29
- initialModel = 'MiniMaxAI/MiniMax-M2',
30
  onAuthChange
31
  }: LandingPageProps) {
32
  const [prompt, setPrompt] = useState('');
@@ -52,7 +52,18 @@ export default function LandingPage({
52
  // Trending apps state
53
  const [trendingApps, setTrendingApps] = useState<any[]>([]);
54
 
 
55
  useEffect(() => {
 
 
 
 
 
 
 
 
 
 
56
  loadData();
57
  handleOAuthInit();
58
  loadTrendingApps();
@@ -142,17 +153,24 @@ export default function LandingPage({
142
  }, []);
143
 
144
  const loadData = async () => {
 
145
  setIsLoading(true);
146
  await Promise.all([loadModels(), loadLanguages()]);
147
  setIsLoading(false);
 
148
  };
149
 
150
  const loadModels = async () => {
151
  try {
 
152
  const modelsList = await apiClient.getModels();
 
 
153
  setModels(modelsList);
 
154
  } catch (error) {
155
  console.error('Failed to load models:', error);
 
156
  }
157
  };
158
 
@@ -322,7 +340,9 @@ export default function LandingPage({
322
  <div className="relative" ref={languageDropdownRef}>
323
  <button
324
  type="button"
325
- onClick={() => {
 
 
326
  setShowLanguageDropdown(!showLanguageDropdown);
327
  setShowModelDropdown(false);
328
  }}
@@ -343,7 +363,10 @@ export default function LandingPage({
343
 
344
  {/* Language Dropdown Menu */}
345
  {showLanguageDropdown && !isLoading && languages.length > 0 && (
346
- <div className="absolute bottom-full left-0 mb-2 w-48 bg-[#1d1d1f] border border-[#424245] rounded-xl shadow-2xl overflow-hidden backdrop-blur-xl">
 
 
 
347
  <div className="max-h-64 overflow-y-auto py-1">
348
  {languages.map((lang) => (
349
  <button
@@ -369,7 +392,14 @@ export default function LandingPage({
369
  <div className="relative" ref={modelDropdownRef}>
370
  <button
371
  type="button"
372
- onClick={() => {
 
 
 
 
 
 
 
373
  setShowModelDropdown(!showModelDropdown);
374
  setShowLanguageDropdown(false);
375
  }}
@@ -392,9 +422,15 @@ export default function LandingPage({
392
  </svg>
393
  </button>
394
 
 
 
 
395
  {/* Model Dropdown Menu */}
396
  {showModelDropdown && models.length > 0 && (
397
- <div className="absolute bottom-full left-0 mb-2 w-80 bg-[#1d1d1f] border border-[#424245] rounded-xl shadow-2xl overflow-hidden backdrop-blur-xl">
 
 
 
398
  <div className="max-h-96 overflow-y-auto py-1">
399
  {models.map((model) => (
400
  <button
@@ -404,18 +440,13 @@ export default function LandingPage({
404
  setSelectedModel(model.id);
405
  setShowModelDropdown(false);
406
  }}
407
- className={`w-full px-4 py-2.5 text-left transition-colors ${
408
  selectedModel === model.id
409
  ? 'bg-[#2d2d2f]'
410
  : 'hover:bg-[#2d2d2f]'
411
  }`}
412
  >
413
  <div className="text-xs font-medium text-[#f5f5f7]">{model.name}</div>
414
- {model.description && (
415
- <div className="text-[10px] text-[#86868b] mt-1 leading-relaxed">
416
- {model.description}
417
- </div>
418
- )}
419
  </button>
420
  ))}
421
  </div>
 
26
  onStart,
27
  isAuthenticated,
28
  initialLanguage = 'html',
29
+ initialModel = 'zai-org/GLM-4.6',
30
  onAuthChange
31
  }: LandingPageProps) {
32
  const [prompt, setPrompt] = useState('');
 
52
  // Trending apps state
53
  const [trendingApps, setTrendingApps] = useState<any[]>([]);
54
 
55
+ // Debug effect for dropdown state
56
  useEffect(() => {
57
+ console.log('showModelDropdown state changed to:', showModelDropdown);
58
+ }, [showModelDropdown]);
59
+
60
+ // Debug effect for models state
61
+ useEffect(() => {
62
+ console.log('models state changed, length:', models.length, 'models:', models);
63
+ }, [models]);
64
+
65
+ useEffect(() => {
66
+ console.log('Component mounted, initial load starting...');
67
  loadData();
68
  handleOAuthInit();
69
  loadTrendingApps();
 
153
  }, []);
154
 
155
  const loadData = async () => {
156
+ console.log('loadData called');
157
  setIsLoading(true);
158
  await Promise.all([loadModels(), loadLanguages()]);
159
  setIsLoading(false);
160
+ console.log('loadData completed');
161
  };
162
 
163
  const loadModels = async () => {
164
  try {
165
+ console.log('Loading models...');
166
  const modelsList = await apiClient.getModels();
167
+ console.log('Models loaded successfully:', modelsList);
168
+ console.log('Number of models:', modelsList.length);
169
  setModels(modelsList);
170
+ console.log('Models state updated');
171
  } catch (error) {
172
  console.error('Failed to load models:', error);
173
+ setModels([]); // Set empty array on error
174
  }
175
  };
176
 
 
340
  <div className="relative" ref={languageDropdownRef}>
341
  <button
342
  type="button"
343
+ onClick={(e) => {
344
+ e.preventDefault();
345
+ e.stopPropagation();
346
  setShowLanguageDropdown(!showLanguageDropdown);
347
  setShowModelDropdown(false);
348
  }}
 
363
 
364
  {/* Language Dropdown Menu */}
365
  {showLanguageDropdown && !isLoading && languages.length > 0 && (
366
+ <div
367
+ className="absolute bottom-full left-0 mb-2 w-48 bg-[#1d1d1f] border border-[#424245] rounded-xl shadow-2xl overflow-hidden backdrop-blur-xl"
368
+ onClick={(e) => e.stopPropagation()}
369
+ >
370
  <div className="max-h-64 overflow-y-auto py-1">
371
  {languages.map((lang) => (
372
  <button
 
392
  <div className="relative" ref={modelDropdownRef}>
393
  <button
394
  type="button"
395
+ onClick={(e) => {
396
+ e.preventDefault();
397
+ e.stopPropagation();
398
+ console.log('Model button clicked!');
399
+ console.log('Current showModelDropdown:', showModelDropdown);
400
+ console.log('Models array:', models);
401
+ console.log('Models length:', models.length);
402
+ console.log('Selected model:', selectedModel);
403
  setShowModelDropdown(!showModelDropdown);
404
  setShowLanguageDropdown(false);
405
  }}
 
422
  </svg>
423
  </button>
424
 
425
+ {/* Debug info */}
426
+ {console.log('Dropdown render check - showModelDropdown:', showModelDropdown, 'models.length:', models.length)}
427
+
428
  {/* Model Dropdown Menu */}
429
  {showModelDropdown && models.length > 0 && (
430
+ <div
431
+ className="absolute top-full left-0 mt-2 w-56 bg-[#1d1d1f] border border-[#424245] rounded-xl shadow-2xl overflow-hidden backdrop-blur-xl z-50"
432
+ onClick={(e) => e.stopPropagation()}
433
+ >
434
  <div className="max-h-96 overflow-y-auto py-1">
435
  {models.map((model) => (
436
  <button
 
440
  setSelectedModel(model.id);
441
  setShowModelDropdown(false);
442
  }}
443
+ className={`w-full px-4 py-2 text-left transition-colors ${
444
  selectedModel === model.id
445
  ? 'bg-[#2d2d2f]'
446
  : 'hover:bg-[#2d2d2f]'
447
  }`}
448
  >
449
  <div className="text-xs font-medium text-[#f5f5f7]">{model.name}</div>
 
 
 
 
 
450
  </button>
451
  ))}
452
  </div>