Spaces:
Sleeping
Sleeping
Update query.py
Browse files
query.py
CHANGED
|
@@ -6,7 +6,7 @@ from urllib.parse import quote
|
|
| 6 |
def extract_between_tags(text, start_tag, end_tag):
|
| 7 |
start_index = text.find(start_tag)
|
| 8 |
end_index = text.find(end_tag, start_index)
|
| 9 |
-
return text[start_index+len(start_tag):end_index
|
| 10 |
|
| 11 |
class VectaraQuery():
|
| 12 |
def __init__(self, api_key: str, customer_id: str, corpus_id: str, prompt_name: str = None):
|
|
@@ -16,7 +16,7 @@ class VectaraQuery():
|
|
| 16 |
self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-large"
|
| 17 |
self.conv_id = None
|
| 18 |
|
| 19 |
-
def get_body(self, user_response: str
|
| 20 |
corpora_key_list = [{
|
| 21 |
'customer_id': self.customer_id, 'corpus_id': self.corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
|
| 22 |
}]
|
|
@@ -26,29 +26,11 @@ class VectaraQuery():
|
|
| 26 |
[
|
| 27 |
{{
|
| 28 |
"role": "system",
|
| 29 |
-
"content": "You are a
|
| 30 |
-
You specialize in the {style} debate style.
|
| 31 |
-
You are provided with search results related to {topic}.
|
| 32 |
-
Follow these INSTRUCTIONS carefully:
|
| 33 |
-
1. Provide a thoughtful and convincing reply.
|
| 34 |
-
2. Do not base your response on information or knowledge that is not in the search results.
|
| 35 |
-
3. Respond with respect to your opponent.
|
| 36 |
-
4. Limit your responses to not more than 2 paragraphs."
|
| 37 |
-
}},
|
| 38 |
-
{{
|
| 39 |
-
"role": "assistant",
|
| 40 |
-
"content": "
|
| 41 |
-
#foreach ($qResult in $vectaraQueryResults)
|
| 42 |
-
Search result $esc.java(${{foreach.index}}+1): $esc.java(${{qResult.getText()}})
|
| 43 |
-
#end
|
| 44 |
-
"
|
| 45 |
}},
|
| 46 |
{{
|
| 47 |
"role": "user",
|
| 48 |
-
"content": "
|
| 49 |
-
Consider the search results as relevant information with which to form your response, but do not mention the results in your response.
|
| 50 |
-
Consider the last argument from your opponent: '{user_response}'.
|
| 51 |
-
Use the {style} debate style to make your argument."
|
| 52 |
}}
|
| 53 |
]
|
| 54 |
'''
|
|
@@ -56,35 +38,16 @@ class VectaraQuery():
|
|
| 56 |
return {
|
| 57 |
'query': [
|
| 58 |
{
|
| 59 |
-
'query':
|
| 60 |
'start': 0,
|
| 61 |
-
'numResults':
|
| 62 |
'corpusKey': corpora_key_list,
|
| 63 |
'context_config': {
|
| 64 |
'sentences_before': 2,
|
| 65 |
'sentences_after': 2,
|
| 66 |
'start_tag': "%START_SNIPPET%",
|
| 67 |
'end_tag': "%END_SNIPPET%",
|
| 68 |
-
}
|
| 69 |
-
'rerankingConfig':
|
| 70 |
-
{
|
| 71 |
-
'rerankerId': 272725718,
|
| 72 |
-
'mmrConfig': {
|
| 73 |
-
'diversityBias': 0.3
|
| 74 |
-
}
|
| 75 |
-
},
|
| 76 |
-
'summary': [
|
| 77 |
-
{
|
| 78 |
-
'responseLang': 'eng',
|
| 79 |
-
'maxSummarizedResults': 7,
|
| 80 |
-
'summarizerPromptName': self.prompt_name,
|
| 81 |
-
'promptText': prompt,
|
| 82 |
-
'chat': {
|
| 83 |
-
'store': True,
|
| 84 |
-
'conversationId': self.conv_id
|
| 85 |
-
},
|
| 86 |
-
}
|
| 87 |
-
]
|
| 88 |
}
|
| 89 |
]
|
| 90 |
}
|
|
@@ -98,11 +61,10 @@ class VectaraQuery():
|
|
| 98 |
"grpc-timeout": "60S"
|
| 99 |
}
|
| 100 |
|
| 101 |
-
def submit_query(self, query_str: str
|
| 102 |
|
| 103 |
endpoint = f"https://api.vectara.io/v1/stream-query"
|
| 104 |
-
body = self.get_body(query_str
|
| 105 |
-
|
| 106 |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
|
| 107 |
if response.status_code != 200:
|
| 108 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
|
@@ -153,4 +115,4 @@ class VectaraQuery():
|
|
| 153 |
chunks.append(accumulated_text)
|
| 154 |
yield accumulated_text
|
| 155 |
|
| 156 |
-
return ''.join(chunks)
|
|
|
|
| 6 |
def extract_between_tags(text, start_tag, end_tag):
|
| 7 |
start_index = text.find(start_tag)
|
| 8 |
end_index = text.find(end_tag, start_index)
|
| 9 |
+
return text[start_index+len(start_tag):end_index]
|
| 10 |
|
| 11 |
class VectaraQuery():
|
| 12 |
def __init__(self, api_key: str, customer_id: str, corpus_id: str, prompt_name: str = None):
|
|
|
|
| 16 |
self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-large"
|
| 17 |
self.conv_id = None
|
| 18 |
|
| 19 |
+
def get_body(self, user_response: str):
|
| 20 |
corpora_key_list = [{
|
| 21 |
'customer_id': self.customer_id, 'corpus_id': self.corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
|
| 22 |
}]
|
|
|
|
| 26 |
[
|
| 27 |
{{
|
| 28 |
"role": "system",
|
| 29 |
+
"content": "You are an assistant that provides information about drink names based on a given corpus."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
}},
|
| 31 |
{{
|
| 32 |
"role": "user",
|
| 33 |
+
"content": "{user_response}"
|
|
|
|
|
|
|
|
|
|
| 34 |
}}
|
| 35 |
]
|
| 36 |
'''
|
|
|
|
| 38 |
return {
|
| 39 |
'query': [
|
| 40 |
{
|
| 41 |
+
'query': user_response,
|
| 42 |
'start': 0,
|
| 43 |
+
'numResults': 10,
|
| 44 |
'corpusKey': corpora_key_list,
|
| 45 |
'context_config': {
|
| 46 |
'sentences_before': 2,
|
| 47 |
'sentences_after': 2,
|
| 48 |
'start_tag': "%START_SNIPPET%",
|
| 49 |
'end_tag': "%END_SNIPPET%",
|
| 50 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
}
|
| 52 |
]
|
| 53 |
}
|
|
|
|
| 61 |
"grpc-timeout": "60S"
|
| 62 |
}
|
| 63 |
|
| 64 |
+
def submit_query(self, query_str: str):
|
| 65 |
|
| 66 |
endpoint = f"https://api.vectara.io/v1/stream-query"
|
| 67 |
+
body = self.get_body(query_str)
|
|
|
|
| 68 |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
|
| 69 |
if response.status_code != 200:
|
| 70 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
|
|
|
| 115 |
chunks.append(accumulated_text)
|
| 116 |
yield accumulated_text
|
| 117 |
|
| 118 |
+
return ''.join(chunks)
|