Update vertexLib.py
Browse files- vertexLib.py +61 -6
vertexLib.py
CHANGED
|
@@ -3,7 +3,7 @@ import time
|
|
| 3 |
import requests
|
| 4 |
import jwt
|
| 5 |
|
| 6 |
-
def get_access_token(
|
| 7 |
current_time = int(time.time())
|
| 8 |
expiration_time = current_time + 600
|
| 9 |
|
|
@@ -24,7 +24,6 @@ def get_access_token(project_id, client_email, private_key):
|
|
| 24 |
except Exception as e:
|
| 25 |
return False, e
|
| 26 |
|
| 27 |
-
# Request the access token
|
| 28 |
response = requests.post(
|
| 29 |
'https://oauth2.googleapis.com/token',
|
| 30 |
data={
|
|
@@ -34,11 +33,67 @@ def get_access_token(project_id, client_email, private_key):
|
|
| 34 |
)
|
| 35 |
|
| 36 |
if response.status_code == 200:
|
| 37 |
-
|
| 38 |
-
return True, access_token
|
| 39 |
else:
|
| 40 |
return False, response.text
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
async def send_gcp_request(session, project_id, access_token, payload, region='us-east5', model='claude-3-5-sonnet@20240620'):
|
| 43 |
VERTEX_URL = f'https://{region}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{region}/publishers/anthropic/models/{model}:streamRawPredict'
|
| 44 |
headers = {
|
|
@@ -51,4 +106,4 @@ async def send_gcp_request(session, project_id, access_token, payload, region='u
|
|
| 51 |
response_data = await response.text()
|
| 52 |
return json.loads(response_data)
|
| 53 |
|
| 54 |
-
return await response.json()
|
|
|
|
| 3 |
import requests
|
| 4 |
import jwt
|
| 5 |
|
| 6 |
+
def get_access_token(client_email, private_key):
|
| 7 |
current_time = int(time.time())
|
| 8 |
expiration_time = current_time + 600
|
| 9 |
|
|
|
|
| 24 |
except Exception as e:
|
| 25 |
return False, e
|
| 26 |
|
|
|
|
| 27 |
response = requests.post(
|
| 28 |
'https://oauth2.googleapis.com/token',
|
| 29 |
data={
|
|
|
|
| 33 |
)
|
| 34 |
|
| 35 |
if response.status_code == 200:
|
| 36 |
+
return True, response.json()['access_token']
|
|
|
|
| 37 |
else:
|
| 38 |
return False, response.text
|
| 39 |
+
|
| 40 |
+
def get_access_token_refresh(client_id, client_secret, refresh_token):
|
| 41 |
+
token_url = "https://oauth2.googleapis.com/token"
|
| 42 |
+
|
| 43 |
+
data = {
|
| 44 |
+
"client_id": client_id,
|
| 45 |
+
"client_secret": client_secret,
|
| 46 |
+
"refresh_token": refresh_token,
|
| 47 |
+
"grant_type": "refresh_token"
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
response = requests.post(token_url, data=data)
|
| 51 |
+
|
| 52 |
+
if response.status_code == 200:
|
| 53 |
+
return True, response.json()["access_token"]
|
| 54 |
+
else:
|
| 55 |
+
return False, response.text
|
| 56 |
+
|
| 57 |
+
def get_gemini_models(key):
|
| 58 |
+
url_getListModel = f"https://generativelanguage.googleapis.com/v1beta/models?key={key}"
|
| 59 |
+
response = requests.get(url_getListModel)
|
| 60 |
+
|
| 61 |
+
if response.status_code == 200:
|
| 62 |
+
models = response.json()
|
| 63 |
+
model_list = []
|
| 64 |
+
for model in models['models']:
|
| 65 |
+
#model_list[model['name'].split('/')[1]] = model['displayName']
|
| 66 |
+
model_name = f"{model['name'].split('/')[1]}" # ({model['displayName']})"
|
| 67 |
+
model_list.append(model_name)
|
| 68 |
+
return model_list
|
| 69 |
+
else:
|
| 70 |
+
return ""
|
| 71 |
+
|
| 72 |
+
def send_fake_gemini_request(key, model='gemini-1.5-flash'):
|
| 73 |
+
url = f'https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={key}'
|
| 74 |
+
payload = {
|
| 75 |
+
"contents": [{ "role": "user", "parts": [{ "text": "" }] }],
|
| 76 |
+
"generationConfig": {
|
| 77 |
+
"maxOutputTokens": 0,
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
try:
|
| 81 |
+
response = requests.post(url=url, headers={'Content-Type': 'application/json'}, json=payload)
|
| 82 |
+
err_msg = response.json().get('error', '')
|
| 83 |
+
return err_msg
|
| 84 |
+
except:
|
| 85 |
+
return None
|
| 86 |
+
|
| 87 |
+
def send_gemini_request(key, payload, model='gemini-1.5-flash'):
|
| 88 |
+
url = f'https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={key}'
|
| 89 |
+
|
| 90 |
+
response = requests.post(url=url, headers={'Content-Type': 'application/json'}, json=payload)
|
| 91 |
+
|
| 92 |
+
if response.status_code == 200:
|
| 93 |
+
return True, response.json()
|
| 94 |
+
else:
|
| 95 |
+
return False, response.text
|
| 96 |
+
|
| 97 |
async def send_gcp_request(session, project_id, access_token, payload, region='us-east5', model='claude-3-5-sonnet@20240620'):
|
| 98 |
VERTEX_URL = f'https://{region}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{region}/publishers/anthropic/models/{model}:streamRawPredict'
|
| 99 |
headers = {
|
|
|
|
| 106 |
response_data = await response.text()
|
| 107 |
return json.loads(response_data)
|
| 108 |
|
| 109 |
+
return await response.json()
|