Spaces:
Build error
Build error
Commit
·
b5c1225
1
Parent(s):
ba25010
Remove conversion of system message to human in ChatGoogleGenerativeAI
Browse filesThe "convert_system_message_to_human" parameter has been removed from all instances of ChatGoogleGenerativeAI model in guardrails_models.py. Now, this conversion will no longer take place.
- guardrails_models.py +0 -3
guardrails_models.py
CHANGED
|
@@ -113,7 +113,6 @@ def gemini_pro(
|
|
| 113 |
):
|
| 114 |
llm = ChatGoogleGenerativeAI(
|
| 115 |
model="gemini-pro",
|
| 116 |
-
convert_system_message_to_human=True,
|
| 117 |
temperature=temperature,
|
| 118 |
max_retries=6,
|
| 119 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|
|
@@ -251,7 +250,6 @@ def gemini_pro_llamaguard(
|
|
| 251 |
else:
|
| 252 |
llm = ChatGoogleGenerativeAI(
|
| 253 |
model="gemini-pro",
|
| 254 |
-
convert_system_message_to_human=True,
|
| 255 |
temperature=temperature,
|
| 256 |
max_retries=6,
|
| 257 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|
|
@@ -384,7 +382,6 @@ def gemini_pro_nemoguardrails(
|
|
| 384 |
config,
|
| 385 |
llm=ChatGoogleGenerativeAI(
|
| 386 |
model="gemini-pro",
|
| 387 |
-
convert_system_message_to_human=True,
|
| 388 |
temperature=temperature,
|
| 389 |
max_retries=6,
|
| 390 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|
|
|
|
| 113 |
):
|
| 114 |
llm = ChatGoogleGenerativeAI(
|
| 115 |
model="gemini-pro",
|
|
|
|
| 116 |
temperature=temperature,
|
| 117 |
max_retries=6,
|
| 118 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|
|
|
|
| 250 |
else:
|
| 251 |
llm = ChatGoogleGenerativeAI(
|
| 252 |
model="gemini-pro",
|
|
|
|
| 253 |
temperature=temperature,
|
| 254 |
max_retries=6,
|
| 255 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|
|
|
|
| 382 |
config,
|
| 383 |
llm=ChatGoogleGenerativeAI(
|
| 384 |
model="gemini-pro",
|
|
|
|
| 385 |
temperature=temperature,
|
| 386 |
max_retries=6,
|
| 387 |
metadata={"top_p": top_p, "max_output_tokens": max_output_tokens},
|