leaderboard / src /envs.py
jasonshaoshun
debug
5bcfeb8
raw
history blame
1.28 kB
import os
from huggingface_hub import HfApi
# Info to change for your repository
# ----------------------------------
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
OWNER = "mech-interp-bench" # Change to your org - don't forget to create a results and request dataset, with the correct format!
# ----------------------------------
REPO_ID = f"{OWNER}/leaderboard"
# RESULTS_REPO = f"{OWNER}/results-mib-test"
# QUEUE_REPO = f"{OWNER}/requests"
QUEUE_REPO = f"shunshao/requests-mib-test"
RESULTS_REPO_MIB_SUBGRAPH = f"{OWNER}/subgraph-results"
RESULTS_REPO_MIB_CAUSALGRAPH = f"{OWNER}/causalgraph-results"
# RESULTS_REPO_MIB_CAUSALGRAPH = f"shunshao/causalgraph-results"
# If you setup a cache later, just change HF_HOME
CACHE_PATH=os.getenv("HF_HOME", ".")
# Local caches
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
# EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
EVAL_RESULTS_MIB_SUBGRAPH_PATH = os.path.join(CACHE_PATH, "eval-results-mib-subgraph")
EVAL_RESULTS_MIB_CAUSALGRAPH_PATH = os.path.join(CACHE_PATH, "eval-results-mib-causalgraph")
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
API = HfApi(token=TOKEN)