MINGYISU commited on
Commit
010981f
·
1 Parent(s): ebd5cd9

fixed bugs

Browse files
Files changed (1) hide show
  1. utils.py +17 -16
utils.py CHANGED
@@ -15,7 +15,7 @@ TASKS_V1 = ["V1-Overall", "I-CLS", "I-QA", "I-RET", "I-VG"]
15
  TASKS_V2 = ["V2-Overall", "V-CLS", "V-QA", "V-RET", "V-MRET", "VisDoc"]
16
  COLUMN_NAMES = BASE_COLS + TASKS_V1 + TASKS_V2
17
 
18
- DATA_TITLE_TYPE = ['number', 'markdown', 'str', 'markdown'] +
19
  ['number'] * (len(TASKS_V1) + len(TASKS_V2))
20
 
21
  LEADERBOARD_INTRODUCTION = """
@@ -68,6 +68,7 @@ SUBMIT_INTRODUCTION = """# Submit on MMEB Leaderboard Introduction
68
  "I-VG": 50.0
69
  },
70
  ]
 
71
 
72
  ### **TO SUBMIT V2 ONLY**
73
  ```json
@@ -77,7 +78,7 @@ SUBMIT_INTRODUCTION = """# Submit on MMEB Leaderboard Introduction
77
  <Optional>"URL": "<Model URL>",
78
  "Model Size(B)": 1000,
79
  "Data Source": Self-Reported,
80
- "V1-Overall": 50.0,
81
  "V-CLS": 50.0,
82
  "V-QA": 50.0,
83
  "V-RET": 50.0,
@@ -86,8 +87,8 @@ SUBMIT_INTRODUCTION = """# Submit on MMEB Leaderboard Introduction
86
  },
87
  ]
88
  ```
89
- You are also welcome to submit both versions by including all the fields above! :)
90
- You may refer to the Github page for instructions about evaluating your model.
91
  Github link: https://github.com/TIGER-AI-Lab/VLM2Vec. \n
92
  Please send us an email at [email protected], attaching the JSON file. We will review your submission and update the leaderboard accordingly.
93
  """
@@ -104,21 +105,21 @@ def create_hyperlinked_names(df):
104
  df = df.apply(add_link_to_model_name, axis=1)
105
  return df
106
 
107
- def fetch_data(file: str) -> pd.DataFrame:
108
- # fetch the leaderboard data from remote
109
- if file is None:
110
- raise ValueError("URL Not Provided")
111
- url = f"https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/{file}"
112
- print(f"Fetching data from {url}")
113
- response = requests.get(url)
114
- if response.status_code != 200:
115
- raise requests.HTTPError(f"Failed to fetch data: HTTP status code {response.status_code}")
116
- return pd.read_json(io.StringIO(response.text), orient='records', lines=True)
117
 
118
  def get_df(file="results.jsonl"):
119
- df = fetch_data(file)
120
  df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size)
121
- df = df.sort_values(by=['Overall'], ascending=False)
122
  df = create_hyperlinked_names(df)
123
  df['Rank'] = range(1, len(df) + 1)
124
  return df
 
15
  TASKS_V2 = ["V2-Overall", "V-CLS", "V-QA", "V-RET", "V-MRET", "VisDoc"]
16
  COLUMN_NAMES = BASE_COLS + TASKS_V1 + TASKS_V2
17
 
18
+ DATA_TITLE_TYPE = ['number', 'markdown', 'str', 'markdown'] + \
19
  ['number'] * (len(TASKS_V1) + len(TASKS_V2))
20
 
21
  LEADERBOARD_INTRODUCTION = """
 
68
  "I-VG": 50.0
69
  },
70
  ]
71
+ ```
72
 
73
  ### **TO SUBMIT V2 ONLY**
74
  ```json
 
78
  <Optional>"URL": "<Model URL>",
79
  "Model Size(B)": 1000,
80
  "Data Source": Self-Reported,
81
+ "V2-Overall": 50.0,
82
  "V-CLS": 50.0,
83
  "V-QA": 50.0,
84
  "V-RET": 50.0,
 
87
  },
88
  ]
89
  ```
90
+ You are also welcome to submit both versions by including all the fields above! :) \n
91
+ You may refer to the Github page for instructions about evaluating your model. \n
92
  Github link: https://github.com/TIGER-AI-Lab/VLM2Vec. \n
93
  Please send us an email at [email protected], attaching the JSON file. We will review your submission and update the leaderboard accordingly.
94
  """
 
105
  df = df.apply(add_link_to_model_name, axis=1)
106
  return df
107
 
108
+ # def fetch_data(file: str) -> pd.DataFrame:
109
+ # # fetch the leaderboard data from remote
110
+ # if file is None:
111
+ # raise ValueError("URL Not Provided")
112
+ # url = f"https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/{file}"
113
+ # print(f"Fetching data from {url}")
114
+ # response = requests.get(url)
115
+ # if response.status_code != 200:
116
+ # raise requests.HTTPError(f"Failed to fetch data: HTTP status code {response.status_code}")
117
+ # return pd.read_json(io.StringIO(response.text), orient='records', lines=True)
118
 
119
  def get_df(file="results.jsonl"):
120
+ df = pd.read_json(file, orient='records', lines=True)
121
  df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size)
122
+ df = df.sort_values(by=['V1-Overall'], ascending=False)
123
  df = create_hyperlinked_names(df)
124
  df['Rank'] = range(1, len(df) + 1)
125
  return df