magicboris commited on
Commit
5fddcf6
·
verified ·
1 Parent(s): f7e9277

Update backend/main.py

Browse files
Files changed (1) hide show
  1. backend/main.py +98 -362
backend/main.py CHANGED
@@ -1,23 +1,16 @@
1
- # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- # SPDX-License-Identifier: Apache-2.0
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
6
  # You may obtain a copy of the License at
7
  #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
  #
10
  # Unless required by applicable law or agreed to in writing, software
11
  # distributed under the License is distributed on an "AS IS" BASIS,
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """
16
- Universal Deep Research Backend (UDR-B) - FastAPI Application
17
-
18
- This module provides the main FastAPI application for the Universal Deep Research Backend,
19
- offering intelligent research and reporting capabilities through streaming APIs.
20
- """
21
 
22
  import asyncio
23
  import json
@@ -30,12 +23,12 @@ import uvicorn
30
  from fastapi import FastAPI, HTTPException
31
  from fastapi.middleware.cors import CORSMiddleware
32
  from fastapi.responses import StreamingResponse
 
33
  from pydantic import BaseModel
34
  from uvicorn.config import LOGGING_CONFIG
35
 
 
36
  import items
37
-
38
- # Import configuration
39
  from config import get_config
40
  from frame.clients import Client, HuggingFaceClient, OpenAIClient
41
  from frame.harness4 import FrameConfigV4, FrameV4
@@ -46,50 +39,36 @@ from scan_research import generate_session_key
46
  from scan_research_dry import do_reporting as dry_reporting
47
  from scan_research_dry import do_research as dry_research
48
 
49
- # Get configuration
50
  config = get_config()
51
 
52
- app = FastAPI(
 
 
 
 
 
53
  title="Universal Deep Research Backend API",
54
  description="Intelligent research and reporting service using LLMs and web search",
55
  version="1.0.0",
56
  )
57
 
 
 
58
 
59
- # раздача Next.js
60
- frontend_dir = pathlib.Path(__file__).parent.parent / "frontend"
61
- app.mount("/", StaticFiles(directory=frontend_dir / ".next" / "static"), name="static")
62
-
63
- # fallback на index.html
64
- @app.get("/{full_path:path}")
65
- async def spa(full_path: str):
66
- return FileResponse(frontend_dir / "public" / "index.html")
67
-
68
-
69
-
70
-
71
-
72
-
73
-
74
- # Configure logging
75
- LOGGING_CONFIG["formatters"]["default"][
76
- "fmt"
77
- ] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
78
-
79
- # Configure CORS
80
- app.add_middleware(
81
  CORSMiddleware,
82
- allow_origins=[config.cors.frontend_url], # Frontend URL from config
83
- allow_credentials=config.cors.allow_credentials,
84
- allow_methods=config.cors.allow_methods,
85
- allow_headers=config.cors.allow_headers,
86
  )
87
 
88
-
89
  class Message(BaseModel):
90
  text: str
91
 
92
-
93
  class ResearchRequest(BaseModel):
94
  dry: bool = False
95
  session_key: Optional[str] = None
@@ -99,18 +78,10 @@ class ResearchRequest(BaseModel):
99
  prompt: Optional[str] = None
100
  mock_directory: str = "mock_instances/stocks_24th_3_sections"
101
 
102
-
103
- @app.get("/")
104
- async def root():
105
- return {
106
- "message": "The Deep Research Backend is running. Use the /api/research endpoint to start a new research session."
107
- }
108
-
109
-
110
  def build_events_path(session_key: str) -> str:
111
  return f"instances/{session_key}.events.jsonl"
112
 
113
-
114
  def make_message(
115
  event: Dict[str, Any],
116
  session_key: str | None = None,
@@ -118,370 +89,135 @@ def make_message(
118
  ) -> str:
119
  if timestamp_the_event:
120
  event = {**event, "timestamp": datetime.now().isoformat()}
121
-
122
  if session_key:
123
  items.register_item(build_events_path(session_key), event)
124
-
125
  return json.dumps({"event": event, "session_key": session_key}) + "\n"
126
 
 
 
 
 
 
127
 
128
- @app.post("/api/research")
129
  async def start_research(request: ResearchRequest):
130
- """
131
- Start or continue a research process and stream the results using JSON streaming.
132
-
133
- This endpoint initiates a comprehensive research workflow that includes:
134
- - Query analysis and topic extraction
135
- - Web search using Tavily API
136
- - Content filtering and relevance scoring
137
- - Report generation using LLMs
138
-
139
- The response is streamed as Server-Sent Events (SSE) with real-time progress updates.
140
-
141
- Args:
142
- request (ResearchRequest): The research request containing:
143
- - dry (bool): Use mock data for testing (default: False)
144
- - session_key (str, optional): Existing session to continue
145
- - start_from (str): "research" or "reporting" phase
146
- - prompt (str): Research query (required for research phase)
147
- - mock_directory (str): Directory for mock data
148
-
149
- Returns:
150
- StreamingResponse: Server-Sent Events stream with research progress
151
-
152
- Raises:
153
- HTTPException: 400 if request parameters are invalid
154
-
155
- Example:
156
- ```bash
157
- curl -X POST http://localhost:8000/api/research \\
158
- -H "Content-Type: application/json" \\
159
- -d '{
160
- "prompt": "What are the latest developments in quantum computing?",
161
- "start_from": "research"
162
- }'
163
- ```
164
- """
165
- # Validate request parameters
166
  if request.start_from not in ["research", "reporting"]:
167
- raise HTTPException(
168
- status_code=400,
169
- detail="start_from must be either 'research' or 'reporting'",
170
- )
171
-
172
  if request.start_from == "reporting" and not request.session_key:
173
- raise HTTPException(
174
- status_code=400,
175
- detail="session_key is required when starting from reporting phase",
176
- )
177
-
178
  if request.start_from == "research" and not request.prompt:
179
- raise HTTPException(
180
- status_code=400,
181
- detail="prompt is required when starting from research phase",
182
- )
183
-
184
- # Use configured mock directory
185
  mock_dir = request.mock_directory or config.research.mock_directory
186
-
187
- # Choose implementation
188
- research_impl = (
189
- (lambda session_key, prompt: dry_research(session_key, prompt, mock_dir))
190
- if request.dry
191
- else real_research
192
- )
193
- reporting_impl = (
194
- (lambda session_key: dry_reporting(session_key, mock_dir))
195
- if request.dry
196
- else real_reporting
197
- )
198
-
199
- # Generate or use provided session key
200
  session_key = request.session_key or generate_session_key()
201
-
202
- # Prepare generators
203
- research_gen = (
204
- research_impl(session_key, request.prompt)
205
- if request.start_from == "research"
206
- else None
207
- )
208
  reporting_gen = reporting_impl(session_key)
209
-
210
  return StreamingResponse(
211
- stream_research_events(
212
- research_gen, reporting_gen, request.start_from == "research", session_key
213
- ),
214
  media_type="application/x-ndjson",
215
- headers={
216
- "Cache-Control": "no-cache",
217
- "Connection": "keep-alive",
218
- "Content-Encoding": "none",
219
- },
220
  )
221
 
 
 
 
 
 
 
222
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  async def stream_research_events(
224
  research_fn: AsyncGenerator[Dict[str, Any], None],
225
  reporting_fn: AsyncGenerator[Dict[str, Any], None],
226
  do_research: bool,
227
  session_key: str,
228
  ) -> AsyncGenerator[str, None]:
229
- """
230
- Stream research or reporting events using JSON streaming format.
231
-
232
- Args:
233
- research_fn: Research phase generator
234
- reporting_fn: Reporting phase generator
235
- do_research: Whether to run research phase
236
- session_key: Session identifier
237
-
238
- Yields:
239
- JSON formatted event strings, one per line
240
- """
241
  try:
242
- yield make_message(
243
- {
244
- "type": "started",
245
- "description": "Waking up the Deep Research Backend",
246
- },
247
- session_key,
248
- )
249
-
250
- error_event_encountered: bool = False
251
  if do_research:
252
  async for event in research_fn:
253
  if event["type"] == "error":
254
  error_event_encountered = True
255
  yield make_message(event, session_key)
256
-
257
  if not error_event_encountered:
258
  async for event in reporting_fn:
259
  yield make_message(event, session_key)
260
-
261
- # Send completion message
262
- yield make_message(
263
- {
264
- "type": "completed",
265
- "description": "Research and reporting completed",
266
- },
267
- session_key,
268
- )
269
  except asyncio.CancelledError:
270
- # Send cancellation message before propagating the exception
271
- yield make_message(
272
- {
273
- "type": "cancelled",
274
- "description": "Research was cancelled",
275
- },
276
- session_key,
277
- )
278
  raise
279
 
280
-
281
- @app.post("/api/research2")
282
- async def start_research2(request: ResearchRequest):
283
- # Validate request parameters
284
- if request.start_from not in ["research"]:
285
- raise HTTPException(status_code=400, detail="start_from must be 'research'")
286
-
287
- if request.start_from == "research" and not request.prompt:
288
- raise HTTPException(
289
- status_code=400,
290
- detail="prompt is required when starting from research phase",
291
- )
292
-
293
- # Generate or use provided session key
294
- session_key = generate_session_key()
295
-
296
- if request.strategy_id is None or request.strategy_id == "default":
297
- # Validate request parameters
298
- if request.start_from not in ["research", "reporting"]:
299
- raise HTTPException(
300
- status_code=400,
301
- detail="start_from must be either 'research' or 'reporting'",
302
- )
303
-
304
- if request.start_from == "reporting" and not request.session_key:
305
- raise HTTPException(
306
- status_code=400,
307
- detail="session_key is required when starting from reporting phase",
308
- )
309
-
310
- if request.start_from == "research" and not request.prompt:
311
- raise HTTPException(
312
- status_code=400,
313
- detail="prompt is required when starting from research phase",
314
- )
315
-
316
- # Choose implementation
317
- research_impl = (
318
- (
319
- lambda session_key, prompt: dry_research(
320
- session_key, prompt, "mock_instances/stocks_24th_3_sections"
321
- )
322
- )
323
- if request.dry
324
- else real_research
325
- )
326
- reporting_impl = (
327
- (
328
- lambda session_key: dry_reporting(
329
- session_key, "mock_instances/stocks_24th_3_sections"
330
- )
331
- )
332
- if request.dry
333
- else real_reporting
334
- )
335
-
336
- # Generate or use provided session key
337
- session_key = request.session_key or generate_session_key()
338
-
339
- # Prepare generators
340
- research_gen = (
341
- research_impl(session_key, request.prompt)
342
- if request.start_from == "research"
343
- else None
344
- )
345
- reporting_gen = reporting_impl(session_key)
346
-
347
- return StreamingResponse(
348
- stream_research_events(
349
- research_gen,
350
- reporting_gen,
351
- request.start_from == "research",
352
- session_key,
353
- ),
354
- media_type="application/x-ndjson",
355
- headers={
356
- "Cache-Control": "no-cache",
357
- "Connection": "keep-alive",
358
- "Content-Encoding": "none",
359
- },
360
- )
361
-
362
- return StreamingResponse(
363
- stream_research2_events(
364
- session_key, request.prompt, request.strategy_id, request.strategy_content
365
- ),
366
- media_type="application/x-ndjson",
367
- headers={
368
- "Cache-Control": "no-cache",
369
- "Connection": "keep-alive",
370
- "Content-Encoding": "none",
371
- },
372
- )
373
-
374
-
375
  async def stream_research2_events(
376
  session_key: str, prompt: str, strategy_id: str, strategy_content: str
377
  ) -> AsyncGenerator[str, None]:
378
  try:
379
- yield make_message(
380
- {
381
- "type": "started",
382
- "description": "Waking up the Universal Deep Research Backend",
383
- },
384
- session_key,
385
- )
386
-
387
- # Set the random seed from configuration
388
  random.seed(config.research.random_seed)
389
-
390
- # Set trace filename using configuration
391
- comm_trace_timestamp: str = datetime.now().strftime("%Y%m%d_%H-%M-%S")
392
- comm_trace_filename = (
393
- f"{config.logging.log_dir}/comms_{comm_trace_timestamp}.log"
394
- )
395
- comm_trace = Trace(
396
- comm_trace_filename, copy_into_stdout=config.logging.copy_into_stdout
397
- )
398
-
399
- client: Client = OpenAIClient(
400
- base_url="https://integrate.api.nvidia.com/v1",
401
- model="nvdev/meta/llama-3.1-70b-instruct",
402
- trace=comm_trace,
403
- )
404
-
405
  frame_config = FrameConfigV4(
406
  long_context_cutoff=config.frame.long_context_cutoff,
407
  force_long_context=config.frame.force_long_context,
408
  max_iterations=config.frame.max_iterations,
409
  interaction_level=config.frame.interaction_level,
410
  )
411
- harness = FrameV4(
412
- client_profile=client,
413
- errand_profile={},
414
- compilation_trace=True,
415
- execution_trace="file_and_stdout",
416
- )
417
-
418
  messages = []
419
- preamble_files = [
420
- "frame/prompts/udr_minimal_generating/0.code_skill.py",
421
- ]
422
  for path in preamble_files:
423
  type = path.split(".")[-2]
424
  with open(path, "r") as f:
425
- messages.append(
426
- {
427
- "mid": len(messages),
428
- "role": "user",
429
- "content": f.read(),
430
- "type": type,
431
- }
432
- )
433
-
434
- messages.append(
435
- {
436
- "mid": len(messages),
437
- "role": "user",
438
- "content": "The following is the prompt data to be used in later procedures.\n\nPROMPT:\n"
439
- + prompt,
440
- "type": "data",
441
- }
442
- )
443
-
444
- messages.append(
445
- {
446
- "mid": len(messages),
447
- "role": "user",
448
- "content": strategy_content,
449
- "type": "generating_routine",
450
- }
451
- )
452
-
453
  for i in range(len(messages)):
454
  messages_so_far = messages[: i + 1]
455
- yield make_message(
456
- {
457
- "type": "generic",
458
- "description": "Processing agentic instructions: "
459
- + str(i + 1)
460
- + " of "
461
- + str(len(messages)),
462
- },
463
- session_key,
464
- )
465
- for notification in harness.generate_with_notifications(
466
- messages=messages_so_far,
467
- frame_config=frame_config,
468
- ):
469
  yield make_message(notification, session_key)
470
-
471
- yield make_message(
472
- {
473
- "type": "completed",
474
- "description": "Research completed",
475
- },
476
- session_key,
477
- )
478
  except asyncio.CancelledError:
479
- # Send cancellation message before propagating the exception
480
- yield make_message(
481
- {
482
- "type": "cancelled",
483
- "description": "Research was cancelled",
484
- },
485
- session_key,
486
- )
487
  raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
 
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
5
  # You may obtain a copy of the License at
6
  #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
  #
9
  # Unless required by applicable law or agreed to in writing, software
10
  # distributed under the License is distributed on an "AS IS" BASIS,
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
 
 
 
 
14
 
15
  import asyncio
16
  import json
 
23
  from fastapi import FastAPI, HTTPException
24
  from fastapi.middleware.cors import CORSMiddleware
25
  from fastapi.responses import StreamingResponse
26
+ from fastapi.staticfiles import StaticFiles
27
  from pydantic import BaseModel
28
  from uvicorn.config import LOGGING_CONFIG
29
 
30
+ # Локальные импорты из вашего оригинального файла
31
  import items
 
 
32
  from config import get_config
33
  from frame.clients import Client, HuggingFaceClient, OpenAIClient
34
  from frame.harness4 import FrameConfigV4, FrameV4
 
39
  from scan_research_dry import do_reporting as dry_reporting
40
  from scan_research_dry import do_research as dry_research
41
 
42
+ # Получаем конфигурацию
43
  config = get_config()
44
 
45
+ # ========================================================================================
46
+ # ИЗМЕНЕНИЕ ДЛЯ HUGGING FACE SPACES
47
+ #
48
+ # 1. Создаем отдельное приложение (sub-app) для API.
49
+ # ========================================================================================
50
+ api_app = FastAPI(
51
  title="Universal Deep Research Backend API",
52
  description="Intelligent research and reporting service using LLMs and web search",
53
  version="1.0.0",
54
  )
55
 
56
+ # Настройка логирования
57
+ LOGGING_CONFIG["formatters"]["default"]["fmt"] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
58
 
59
+ # Настройка CORS (можно удалить, если развертывание в одном домене)
60
+ api_app.add_middleware(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  CORSMiddleware,
62
+ allow_origins=["*"], # Разрешаем все источники для простоты
63
+ allow_credentials=True,
64
+ allow_methods=["*"],
65
+ allow_headers=["*"],
66
  )
67
 
68
+ # Модели Pydantic из вашего файла
69
  class Message(BaseModel):
70
  text: str
71
 
 
72
  class ResearchRequest(BaseModel):
73
  dry: bool = False
74
  session_key: Optional[str] = None
 
78
  prompt: Optional[str] = None
79
  mock_directory: str = "mock_instances/stocks_24th_3_sections"
80
 
81
+ # Вспомогательные функции из вашего файла
 
 
 
 
 
 
 
82
  def build_events_path(session_key: str) -> str:
83
  return f"instances/{session_key}.events.jsonl"
84
 
 
85
  def make_message(
86
  event: Dict[str, Any],
87
  session_key: str | None = None,
 
89
  ) -> str:
90
  if timestamp_the_event:
91
  event = {**event, "timestamp": datetime.now().isoformat()}
 
92
  if session_key:
93
  items.register_item(build_events_path(session_key), event)
 
94
  return json.dumps({"event": event, "session_key": session_key}) + "\n"
95
 
96
+ # ========================================================================================
97
+ # ИЗМЕНЕНИЕ ДЛЯ HUGGING FACE SPACES
98
+ #
99
+ # 2. Все эндпоинты @app.post(...) заменяем на @api_app.post(...)
100
+ # ========================================================================================
101
 
102
+ @api_app.post("/research")
103
  async def start_research(request: ResearchRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if request.start_from not in ["research", "reporting"]:
105
+ raise HTTPException(status_code=400, detail="start_from must be either 'research' or 'reporting'")
 
 
 
 
106
  if request.start_from == "reporting" and not request.session_key:
107
+ raise HTTPException(status_code=400, detail="session_key is required when starting from reporting phase")
 
 
 
 
108
  if request.start_from == "research" and not request.prompt:
109
+ raise HTTPException(status_code=400, detail="prompt is required when starting from research phase")
110
+
 
 
 
 
111
  mock_dir = request.mock_directory or config.research.mock_directory
112
+ research_impl = (lambda session_key, prompt: dry_research(session_key, prompt, mock_dir)) if request.dry else real_research
113
+ reporting_impl = (lambda session_key: dry_reporting(session_key, mock_dir)) if request.dry else real_reporting
 
 
 
 
 
 
 
 
 
 
 
 
114
  session_key = request.session_key or generate_session_key()
115
+ research_gen = research_impl(session_key, request.prompt) if request.start_from == "research" else None
 
 
 
 
 
 
116
  reporting_gen = reporting_impl(session_key)
117
+
118
  return StreamingResponse(
119
+ stream_research_events(research_gen, reporting_gen, request.start_from == "research", session_key),
 
 
120
  media_type="application/x-ndjson",
121
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Encoding": "none"},
 
 
 
 
122
  )
123
 
124
+ @api_app.post("/research2")
125
+ async def start_research2(request: ResearchRequest):
126
+ if request.start_from not in ["research"]:
127
+ raise HTTPException(status_code=400, detail="start_from must be 'research'")
128
+ if request.start_from == "research" and not request.prompt:
129
+ raise HTTPException(status_code=400, detail="prompt is required when starting from research phase")
130
 
131
+ session_key = generate_session_key()
132
+ if request.strategy_id is None or request.strategy_id == "default":
133
+ research_impl = (lambda session_key, prompt: dry_research(session_key, prompt, "mock_instances/stocks_24th_3_sections")) if request.dry else real_research
134
+ reporting_impl = (lambda session_key: dry_reporting(session_key, "mock_instances/stocks_24th_3_sections")) if request.dry else real_reporting
135
+ session_key = request.session_key or generate_session_key()
136
+ research_gen = research_impl(session_key, request.prompt) if request.start_from == "research" else None
137
+ reporting_gen = reporting_impl(session_key)
138
+ return StreamingResponse(
139
+ stream_research_events(research_gen, reporting_gen, request.start_from == "research", session_key),
140
+ media_type="application/x-ndjson",
141
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Encoding": "none"},
142
+ )
143
+
144
+ return StreamingResponse(
145
+ stream_research2_events(session_key, request.prompt, request.strategy_id, request.strategy_content),
146
+ media_type="application/x-ndjson",
147
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Encoding": "none"},
148
+ )
149
+
150
+ # Асинхронные генераторы событий остаются без изменений
151
  async def stream_research_events(
152
  research_fn: AsyncGenerator[Dict[str, Any], None],
153
  reporting_fn: AsyncGenerator[Dict[str, Any], None],
154
  do_research: bool,
155
  session_key: str,
156
  ) -> AsyncGenerator[str, None]:
 
 
 
 
 
 
 
 
 
 
 
 
157
  try:
158
+ yield make_message({"type": "started", "description": "Waking up the Deep Research Backend"}, session_key)
159
+ error_event_encountered = False
 
 
 
 
 
 
 
160
  if do_research:
161
  async for event in research_fn:
162
  if event["type"] == "error":
163
  error_event_encountered = True
164
  yield make_message(event, session_key)
 
165
  if not error_event_encountered:
166
  async for event in reporting_fn:
167
  yield make_message(event, session_key)
168
+ yield make_message({"type": "completed", "description": "Research and reporting completed"}, session_key)
 
 
 
 
 
 
 
 
169
  except asyncio.CancelledError:
170
+ yield make_message({"type": "cancelled", "description": "Research was cancelled"}, session_key)
 
 
 
 
 
 
 
171
  raise
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  async def stream_research2_events(
174
  session_key: str, prompt: str, strategy_id: str, strategy_content: str
175
  ) -> AsyncGenerator[str, None]:
176
  try:
177
+ yield make_message({"type": "started", "description": "Waking up the Universal Deep Research Backend"}, session_key)
 
 
 
 
 
 
 
 
178
  random.seed(config.research.random_seed)
179
+ comm_trace_timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S")
180
+ comm_trace_filename = f"{config.logging.log_dir}/comms_{comm_trace_timestamp}.log"
181
+ comm_trace = Trace(comm_trace_filename, copy_into_stdout=config.logging.copy_into_stdout)
182
+ client: Client = OpenAIClient(base_url="https://integrate.api.nvidia.com/v1", model="nvdev/meta/llama-3.1-70b-instruct", trace=comm_trace)
 
 
 
 
 
 
 
 
 
 
 
 
183
  frame_config = FrameConfigV4(
184
  long_context_cutoff=config.frame.long_context_cutoff,
185
  force_long_context=config.frame.force_long_context,
186
  max_iterations=config.frame.max_iterations,
187
  interaction_level=config.frame.interaction_level,
188
  )
189
+ harness = FrameV4(client_profile=client, errand_profile={}, compilation_trace=True, execution_trace="file_and_stdout")
 
 
 
 
 
 
190
  messages = []
191
+ preamble_files = ["frame/prompts/udr_minimal_generating/0.code_skill.py"]
 
 
192
  for path in preamble_files:
193
  type = path.split(".")[-2]
194
  with open(path, "r") as f:
195
+ messages.append({"mid": len(messages), "role": "user", "content": f.read(), "type": type})
196
+ messages.append({"mid": len(messages), "role": "user", "content": "The following is the prompt data to be used in later procedures.\n\nPROMPT:\n" + prompt, "type": "data"})
197
+ messages.append({"mid": len(messages), "role": "user", "content": strategy_content, "type": "generating_routine"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  for i in range(len(messages)):
199
  messages_so_far = messages[: i + 1]
200
+ yield make_message({"type": "generic", "description": f"Processing agentic instructions: {i + 1} of {len(messages)}"}, session_key)
201
+ for notification in harness.generate_with_notifications(messages=messages_so_far, frame_config=frame_config):
 
 
 
 
 
 
 
 
 
 
 
 
202
  yield make_message(notification, session_key)
203
+ yield make_message({"type": "completed", "description": "Research completed"}, session_key)
 
 
 
 
 
 
 
204
  except asyncio.CancelledError:
205
+ yield make_message({"type": "cancelled", "description": "Research was cancelled"}, session_key)
 
 
 
 
 
 
 
206
  raise
207
+
208
+ # ========================================================================================
209
+ # ИЗМЕНЕНИЕ ДЛЯ HUGGING FACE SPACES
210
+ #
211
+ # 3. Создаем главное приложение `app`.
212
+ # 4. Монтируем `api_app` на `/api`.
213
+ # 5. Монтируем статический фронтенд в корень `/`.
214
+ # ========================================================================================
215
+
216
+ app = FastAPI()
217
+
218
+ # Монтируем API
219
+ app.mount("/api", api_app)
220
+
221
+ # Монтируем статический фронтенд
222
+ # Это должно быть в самом конце файла!
223
+ app.mount("/", StaticFiles(directory="/app/static_frontend", html=True), name="static")