Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
kartheikiyer
commited on
Commit
·
780c323
1
Parent(s):
c0025fa
remove partial yield for deep research
Browse files
.ipynb_checkpoints/app_gradio-checkpoint.py
CHANGED
|
@@ -533,16 +533,12 @@ def compileinfo(query, atom_qns, atom_qn_ans, atom_qn_strs):
|
|
| 533 |
def deep_research(question, top_k, ec):
|
| 534 |
|
| 535 |
full_answer = '## ' + question
|
| 536 |
-
yield None, None
|
| 537 |
|
| 538 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
| 539 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
| 540 |
rscope_text = gen_client.invoke(messages).content
|
| 541 |
|
| 542 |
full_answer = full_answer +' \n'+ rscope_text
|
| 543 |
-
rag_answer = {}
|
| 544 |
-
rag_answer['answer'] = full_answer
|
| 545 |
-
yield None, rag_answer
|
| 546 |
|
| 547 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
| 548 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
@@ -567,9 +563,6 @@ def deep_research(question, top_k, ec):
|
|
| 567 |
atom_qn_strs.append(linkstr)
|
| 568 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
| 569 |
full_answer = full_answer +' \n'+smallans
|
| 570 |
-
rag_answer = {}
|
| 571 |
-
rag_answer['answer'] = full_answer
|
| 572 |
-
yield None, rag_answer
|
| 573 |
|
| 574 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
| 575 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
|
|
|
| 533 |
def deep_research(question, top_k, ec):
|
| 534 |
|
| 535 |
full_answer = '## ' + question
|
|
|
|
| 536 |
|
| 537 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
| 538 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
| 539 |
rscope_text = gen_client.invoke(messages).content
|
| 540 |
|
| 541 |
full_answer = full_answer +' \n'+ rscope_text
|
|
|
|
|
|
|
|
|
|
| 542 |
|
| 543 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
| 544 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
|
|
| 563 |
atom_qn_strs.append(linkstr)
|
| 564 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
| 565 |
full_answer = full_answer +' \n'+smallans
|
|
|
|
|
|
|
|
|
|
| 566 |
|
| 567 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
| 568 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
app_gradio.py
CHANGED
|
@@ -533,16 +533,12 @@ def compileinfo(query, atom_qns, atom_qn_ans, atom_qn_strs):
|
|
| 533 |
def deep_research(question, top_k, ec):
|
| 534 |
|
| 535 |
full_answer = '## ' + question
|
| 536 |
-
yield None, None
|
| 537 |
|
| 538 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
| 539 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
| 540 |
rscope_text = gen_client.invoke(messages).content
|
| 541 |
|
| 542 |
full_answer = full_answer +' \n'+ rscope_text
|
| 543 |
-
rag_answer = {}
|
| 544 |
-
rag_answer['answer'] = full_answer
|
| 545 |
-
yield None, rag_answer
|
| 546 |
|
| 547 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
| 548 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
@@ -567,9 +563,6 @@ def deep_research(question, top_k, ec):
|
|
| 567 |
atom_qn_strs.append(linkstr)
|
| 568 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
| 569 |
full_answer = full_answer +' \n'+smallans
|
| 570 |
-
rag_answer = {}
|
| 571 |
-
rag_answer['answer'] = full_answer
|
| 572 |
-
yield None, rag_answer
|
| 573 |
|
| 574 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
| 575 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
|
|
|
| 533 |
def deep_research(question, top_k, ec):
|
| 534 |
|
| 535 |
full_answer = '## ' + question
|
|
|
|
| 536 |
|
| 537 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
| 538 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
| 539 |
rscope_text = gen_client.invoke(messages).content
|
| 540 |
|
| 541 |
full_answer = full_answer +' \n'+ rscope_text
|
|
|
|
|
|
|
|
|
|
| 542 |
|
| 543 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
| 544 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
|
|
| 563 |
atom_qn_strs.append(linkstr)
|
| 564 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
| 565 |
full_answer = full_answer +' \n'+smallans
|
|
|
|
|
|
|
|
|
|
| 566 |
|
| 567 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
| 568 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|