Spaces:
Sleeping
Sleeping
Commit
·
c67b794
1
Parent(s):
c09202f
fix support gated models
Browse files- src/backend.py +8 -9
src/backend.py
CHANGED
|
@@ -142,7 +142,11 @@ def _backend_routine():
|
|
| 142 |
pending_models = list(set(rl_models) - set(evaluated_models))
|
| 143 |
pending_and_compatible_models = []
|
| 144 |
for repo_id, sha in pending_models:
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
if "agent.pt" in filenames:
|
| 147 |
pending_and_compatible_models.append((repo_id, sha))
|
| 148 |
|
|
@@ -160,15 +164,10 @@ def _backend_routine():
|
|
| 160 |
row = {"model_id": model_id, "user_id": user_id, "sha": sha}
|
| 161 |
|
| 162 |
# Run an evaluation on the models
|
| 163 |
-
|
| 164 |
-
model_info = API.model_info(repo_id, revision=sha)
|
| 165 |
-
# Extract the environment IDs from the tags (usually only one)
|
| 166 |
-
env_ids = pattern_match(model_info.tags, ALL_ENV_IDS)
|
| 167 |
-
except Exception as e:
|
| 168 |
-
logger.error(f"Error fetching model info for {repo_id}: {e}")
|
| 169 |
-
logger.exception(e)
|
| 170 |
-
env_ids = []
|
| 171 |
|
|
|
|
|
|
|
| 172 |
if len(env_ids) > 0:
|
| 173 |
env_id = env_ids[0]
|
| 174 |
logger.info(f"Running evaluation on {user_id}/{model_id}")
|
|
|
|
| 142 |
pending_models = list(set(rl_models) - set(evaluated_models))
|
| 143 |
pending_and_compatible_models = []
|
| 144 |
for repo_id, sha in pending_models:
|
| 145 |
+
try:
|
| 146 |
+
siblings = API.model_info(repo_id, revision="main").siblings
|
| 147 |
+
except Exception:
|
| 148 |
+
continue
|
| 149 |
+
filenames = [sib.rfilename for sib in siblings]
|
| 150 |
if "agent.pt" in filenames:
|
| 151 |
pending_and_compatible_models.append((repo_id, sha))
|
| 152 |
|
|
|
|
| 164 |
row = {"model_id": model_id, "user_id": user_id, "sha": sha}
|
| 165 |
|
| 166 |
# Run an evaluation on the models
|
| 167 |
+
model_info = API.model_info(repo_id, revision=sha)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
+
# Extract the environment IDs from the tags (usually only one)
|
| 170 |
+
env_ids = pattern_match(model_info.tags, ALL_ENV_IDS)
|
| 171 |
if len(env_ids) > 0:
|
| 172 |
env_id = env_ids[0]
|
| 173 |
logger.info(f"Running evaluation on {user_id}/{model_id}")
|