File size: 17,565 Bytes
703dffd
04c52df
703dffd
 
a518f79
04c52df
 
703dffd
04c52df
 
 
703dffd
a4a37cc
04c52df
 
 
 
a4a37cc
04c52df
 
 
 
 
 
 
 
 
a4a37cc
 
f1a6dea
04c52df
 
703dffd
 
 
04c52df
 
 
 
 
 
 
 
 
 
 
 
703dffd
 
04c52df
 
 
 
 
 
 
 
 
 
 
703dffd
 
 
 
 
04c52df
 
703dffd
 
 
 
 
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703dffd
 
 
 
 
a518f79
703dffd
 
 
 
263c06c
 
 
 
 
 
2390730
04c52df
263c06c
703dffd
 
f1a6dea
703dffd
 
 
 
 
f1a6dea
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
703dffd
 
a4a37cc
f1a6dea
04c52df
 
 
703dffd
 
04c52df
 
 
 
 
 
 
 
 
 
 
 
703dffd
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c8f379
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f756530
04c52df
 
 
 
 
 
 
 
 
1c8f379
04c52df
 
703dffd
 
04c52df
 
 
a4a37cc
 
 
 
 
04c52df
 
 
 
 
 
a4a37cc
 
04c52df
 
 
a4a37cc
 
 
 
 
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4a37cc
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f756530
04c52df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4a37cc
 
703dffd
 
 
04c52df
703dffd
 
f70c67c
703dffd
 
04c52df
 
a4a37cc
703dffd
a4a37cc
 
 
04c52df
b54cb8f
04c52df
00785bd
703dffd
04c52df
 
 
 
 
 
504af34
703dffd
04c52df
 
 
 
 
a4a37cc
04c52df
 
 
 
 
a4a37cc
 
04c52df
 
 
 
 
 
 
 
 
a518f79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
import gradio as gr
from huggingface_hub import list_spaces, list_models, list_datasets
from cachetools import TTLCache, cached
from toolz import groupby, valmap

import platform
from enum import Enum

is_macos = platform.system() == "Darwin"
LIMIT = 1_000_000 if is_macos else None
NONE_AUTHOR = "HuggingFace Team"  # TODO deal with this


class HubRepoType(Enum):
    MODEL = "model"
    DATASET = "dataset"
    SPACE = "space"


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def get_spaces():  # β‰ˆ
    return list(list_spaces(full=True, limit=LIMIT))


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def get_models():
    return list(iter(list_models(full=True, limit=LIMIT)))


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def get_datasets():
    return list(iter(list_datasets(full=True, limit=LIMIT)))


get_spaces()  # to warm up the cache
get_models()  # to warm up the cache
get_datasets()  # to warm up the cache


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def valid_dataset_ids():
    return {dataset.id for dataset in get_datasets()}


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def valid_model_ids():
    return {model.id for model in get_models()}


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def valid_space_ids():
    return {space.id for space in get_spaces()}


VALID_DATASET_IDS = valid_dataset_ids()
VALID_MODEL_IDS = valid_model_ids()
VALID_SPACE_IDS = valid_space_ids()


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_space_to_like_dict():
    spaces = get_spaces()
    return {space.id: space.likes for space in spaces}


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_org_to_space_like_dict():
    spaces = get_spaces()
    grouped = groupby(lambda x: x.author, spaces)
    return valmap(lambda x: sum(s.likes for s in x), grouped)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_model_to_like_dict(metric_kind):
    models = get_models()
    if metric_kind == "likes":
        return {model.id: model.likes for model in models}
    if metric_kind == "downloads":
        return {model.id: model.downloads for model in models}
    raise ValueError(f"Unsupported metric_kind: {metric_kind}")


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_org_to_model_metrics(metric_kind="likes"):
    models = get_models()
    # remove authors who are None
    models = [model for model in models if model.author is not None]
    grouped = groupby(lambda x: x.author, models)
    if metric_kind:
        return valmap(lambda x: sum(s.likes for s in x), grouped)
    else:
        return valmap(lambda x: sum(s.downloads for s in x), grouped)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_dataset_to_like_dict(metric_kind="likes"):
    datasets = get_datasets()
    if metric_kind == "likes":
        return {dataset.id: dataset.likes for dataset in datasets}
    if metric_kind == "downloads":
        return {dataset.id: dataset.downloads for dataset in datasets}


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def create_org_to_dataset_metrics(metric_kind="likes"):
    datasets = get_datasets()
    # remove authors who are None
    datasets = [dataset for dataset in datasets if dataset.author is not None]
    grouped = groupby(lambda x: x.author, datasets)
    if metric_kind:
        return valmap(lambda x: sum(s.likes for s in x), grouped)
    else:
        return valmap(lambda x: sum(s.downloads for s in x), grouped)


def relative_rank(my_dict, target_key, filter_zero=False):
    if filter_zero:
        my_dict = {k: v for k, v in my_dict.items() if v != 0}

    if target_key not in my_dict:
        raise gr.Error(f"'{target_key}' not found please check the ID and try again.")

    sorted_items = sorted(my_dict.items(), key=lambda item: item[1], reverse=True)

    position = [key for key, _ in sorted_items].index(target_key)
    num_lower = len(sorted_items) - position - 1
    num_higher = position
    return {
        "rank": (num_higher + 1) / len(my_dict) * 100,
        "num_higher": num_higher,
        "num_lower": num_lower,
        "value": my_dict[target_key],
        "position": num_higher + 1,
    }


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_rank_for_space(space_id, filter_zero=False):
    space_to_like_dict = create_space_to_like_dict()
    return relative_rank(space_to_like_dict, space_id, filter_zero=filter_zero)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_rank_for_model(model_id, metric_kind="likes", filter_zero=False):
    model_to_like_dict = create_model_to_like_dict(metric_kind)
    return relative_rank(model_to_like_dict, model_id, filter_zero=filter_zero)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_rank_for_dataset(dataset_id, metric_kind="likes", filter_zero=False):
    dataset_to_like_dict = create_dataset_to_like_dict(metric_kind)
    return relative_rank(dataset_to_like_dict, dataset_id, filter_zero=filter_zero)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_space_rank_for_org(org_id, filter_zero=False):
    org_to_like_dict = create_org_to_space_like_dict()
    return relative_rank(org_to_like_dict, org_id, filter_zero=filter_zero)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_model_rank_for_org(org_id, metric_kind="likes", filter_zero=False):
    org_to_like_dict = create_org_to_model_metrics(metric_kind)
    return relative_rank(org_to_like_dict, org_id, filter_zero=filter_zero)


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def relative_dataset_rank_for_org(org_id, metric_kind="likes", filter_zero=False):
    org_to_like_dict = create_org_to_dataset_metrics(metric_kind)
    return relative_rank(org_to_like_dict, org_id, filter_zero=filter_zero)


# @cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
# def rank_space(space_id):
#     return relative_rank_for_space(space_id)


def rank_space_and_org(space_or_org_id, kind, filter_zero):
    filter_zero = filter_zero == "yes"
    split_length = len(space_or_org_id.split("/"))

    # Logic for split_length == 2
    if split_length == 2:
        return _rank_single_repo(space_or_org_id, kind, filter_zero)

    # Handle kind-specific logic for split_length == 1
    if split_length == 1:
        valid_ids = {"model": VALID_MODEL_IDS, "dataset": VALID_DATASET_IDS}

        if kind in valid_ids and space_or_org_id in valid_ids[kind]:
            return _rank_single_repo(space_or_org_id, kind, filter_zero)
        else:
            return _rank_by_org(space_or_org_id, kind, filter_zero)

    # If no conditions match, handle unexpected cases (optional)
    raise ValueError(
        f"Unexpected combination of space_or_org_id '{space_or_org_id}' and kind"
        f" '{kind}'"
    )


def _rank_by_org(space_or_org_id, kind, filter_zero):
    if kind == "space":
        org_rank = relative_space_rank_for_org(space_or_org_id, filter_zero=filter_zero)
    elif kind == "model":
        org_rank = relative_model_rank_for_org(space_or_org_id, filter_zero=filter_zero)
    elif kind == "dataset":
        org_rank = relative_dataset_rank_for_org(
            space_or_org_id, filter_zero=filter_zero
        )
    result = (
        f"## ⭐️ Org/User {kind.title()} Likes Rankings ⭐️\n"
        + f"Here are the rankings for the org/user across all of their {kind}s \n"
    )
    result += f"""- You have {org_rank['value']:,} likes for this org/user.\n"""
    result += f"""- Your org/user is ranked {org_rank['position']:,}\n"""
    result += f"""- You have {org_rank['num_higher']:,} orgs/users above and {org_rank['num_lower']:,} orgs/users below in the ranking of {kind} likes \n\n"""
    result += f"""- Organization or user [{space_or_org_id}](https://huggingface.co/{space_or_org_id}) is ranked in the top {org_rank['rank']:.2f}% \n\n"""
    if kind == "space":
        result += f"""You can find all your Spaces sorted by likes [here](https://huggingface.co/{space_or_org_id}?sort_spaces=likes#spaces)\n"""
    if kind == "model":
        result += f"""You can find all your Models sorted by likes [here](https://huggingface.co/{space_or_org_id}?sort_models=likes#models)\n"""
    if kind == "dataset":
        result += f"""You can find all your Datasets sorted by likes [here](https://huggingface.co/{space_or_org_id}?sort_datasets=likes#datasets)\n"""
    return _create_footer_message(result, kind)


def _rank_single_repo(space_or_org_id, kind, filter_zero):
    if kind == "space":
        repo_rank = relative_rank_for_space(space_or_org_id, filter_zero=filter_zero)
    elif kind == "model":
        repo_rank = relative_rank_for_model(space_or_org_id, filter_zero=filter_zero)
    elif kind == "dataset":
        repo_rank = relative_rank_for_dataset(space_or_org_id, filter_zero=filter_zero)
    result = f"## ⭐️ {kind.title()} Likes Rankings ⭐️\n"
    result += f"""Here are the rankings by likes for [`{space_or_org_id}`](https://huggingface.co/spaces/{space_or_org_id}) across all {kind}s \n"""
    result += f"""- You have {repo_rank['value']:,} likes for this {kind}.\n"""
    result += f"""- Your {kind} is ranked {repo_rank['position']:,}.\n"""
    if kind == "space":
        result += f"""- Space [{space_or_org_id}](https://huggingface.co/spaces/{space_or_org_id}) is ranked {repo_rank['rank']:.2f}%\n"""
    if kind == "model":
        result += f"""- Model [{space_or_org_id}](https://huggingface.co/{space_or_org_id}) is ranked {repo_rank['rank']:.2f}%\n"""
    if kind == "dataset":
        result += f"""- Dataset [{space_or_org_id}](https://huggingface.co/dataset/{space_or_org_id}) is ranked {repo_rank['rank']:.2f}%\n"""
    result += f"""- You have {repo_rank['num_higher']:,} {kind}s above and {repo_rank['num_lower']:,} {kind}s below in the ranking of {kind}s likes\n\n"""
    return _create_footer_message(result, kind)


def _create_footer_message(result, kind):
    result += """### ✨ Remember likes aren't everything!✨\n"""
    if kind == "space":
        result += """Some Spaces go very viral whilst other Spaces may be very useful for a smaller audience. If you think your Space is useful, please add it to this [thread](https://huggingface.co/spaces/librarian-bots/ranker/discussions/3) of awesome Spaces.
            We'll look out for awesome Spaces added to this thread to promote more widely!"""
    return result


def get_top_n_orgs_and_users_spaces(top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    orgs_to_likes = create_org_to_space_like_dict()
    sorted_items = sorted(orgs_to_likes.items(), key=lambda item: item[1], reverse=True)
    sorted_items = sorted_items[:top_n]
    return sorted_items


def get_top_n_orgs_and_users_models(metric, top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    orgs_to_likes = create_org_to_model_metrics(metric)
    sorted_items = sorted(orgs_to_likes.items(), key=lambda item: item[1], reverse=True)
    sorted_items = sorted_items[:top_n]
    return sorted_items


def get_top_n_orgs_and_users_datasets(metric, top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    orgs_to_likes = create_org_to_dataset_metrics(metric)
    sorted_items = sorted(orgs_to_likes.items(), key=lambda item: item[1], reverse=True)
    sorted_items = sorted_items[:top_n]
    return sorted_items


def plot_top_n_orgs_and_users(kind, metric="likes", top_n=100):
    if kind == "space":
        top_n = get_top_n_orgs_and_users_spaces(top_n)
        header = """## πŸ… Top 100 Orgs and Users by Space Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{org}](https://huggingface.co/{org}) with {likes:,} likes"
            for i, (org, likes) in enumerate(top_n)
        )
        return header + body

    elif kind == "model":
        top_n = get_top_n_orgs_and_users_models(metric, top_n=top_n)
        header = """## πŸ… Top 100 Orgs and Users by Model Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{org}](https://huggingface.co/{org}) with {likes:,} likes"
            for i, (org, likes) in enumerate(top_n)
        )
        return header + body
    elif kind == "dataset":
        top_n = get_top_n_orgs_and_users_datasets(metric, top_n=top_n)
        header = """## πŸ… Top 100 Orgs and Users by Dataset Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{org}](https://huggingface.co/{org}) with {likes:,} likes"
            for i, (org, likes) in enumerate(top_n)
        )
        return header + body


def get_top_n_spaces(top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    space_to_likes = create_space_to_like_dict()
    sorted_items = sorted(
        space_to_likes.items(), key=lambda item: item[1], reverse=True
    )
    sorted_items = sorted_items[:top_n]
    return sorted_items


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def get_top_n_models(metric_kind, top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    model_to_likes = create_model_to_like_dict(metric_kind)
    sorted_items = sorted(
        model_to_likes.items(), key=lambda item: item[1], reverse=True
    )
    sorted_items = sorted_items[:top_n]
    return sorted_items


@cached(cache=TTLCache(maxsize=100, ttl=60 * 30))
def get_top_n_datasets(metric, top_n=100):
    # gr.Info("Updating leaderboard, this may take a few seconds...")
    dataset_to_likes = create_dataset_to_like_dict(metric)
    sorted_items = sorted(
        dataset_to_likes.items(), key=lambda item: item[1], reverse=True
    )
    sorted_items = sorted_items[:top_n]
    return sorted_items


def _plot_top_n_hub_repos(kind: HubRepoType, metric="likes", top_n=100):
    if kind == HubRepoType.SPACE:
        top_n = get_top_n_spaces(top_n)
        header = """## πŸ… Top 100 Space repositories by Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{space}](https://huggingface.co/spaces/{space}) with"
            f" {likes:,} likes"
            for i, (space, likes) in enumerate(top_n)
        )
        return header + body
    elif kind == HubRepoType.MODEL:
        top_n = get_top_n_models(metric, top_n)
        header = """## πŸ… Top 100 Model repositories by Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{model}](https://huggingface.co/{model}) with"
            f" {likes:,} likes"
            for i, (model, likes) in enumerate(top_n)
        )
        return header + body
    elif kind == HubRepoType.DATASET:
        top_n = get_top_n_datasets(metric, top_n)
        header = """## πŸ… Top 100 Dataset repositories by Likes πŸ…"""
        body = "".join(
            f"\n{i+1}. [{dataset}](https://huggingface.co/dataset/{dataset}) with"
            f" {likes:,} likes"
            for i, (dataset, likes) in enumerate(top_n)
        )
        return header + body


def plot_top_n_hub_repos(kind, metric_kind="likes", top_n=100):
    if kind == "space":
        return _plot_top_n_hub_repos(HubRepoType.SPACE, top_n)
    elif kind == "model":
        return _plot_top_n_hub_repos(HubRepoType.MODEL, metric=metric_kind, top_n=top_n)
    elif kind == "dataset":
        return _plot_top_n_hub_repos(
            HubRepoType.DATASET, metric=metric_kind, top_n=top_n
        )


with gr.Blocks() as demo:
    gr.HTML("<h1 style='text-align: center;'> &#127942; HuggyRanker &#127942; </h1>")
    gr.HTML(
        """<p style='text-align: center;'>Rank a single repository or all of the repositories created by an organization or user by likes</p>"""
    )
    gr.HTML(
        """<p style="text-align: center;"><i>Remember likes aren't everything!</i></p>"""
    )
    gr.Markdown(
        """## Rank Specific Hub repositories or rank an organization or user by likes
    Provide this app with a Hub ID e.g. `librarian-bots/ranker` or a Username/Organization name e.g. `librarian-bots` to rank by likes."""
    )
    with gr.Row():
        space_id = gr.Textbox(
            "librarian-bots", max_lines=1, label="Space or user/organization ID"
        )
        filter_zero_likes = gr.Radio(
            choices=["no", "yes"],
            label="Filter out repositories with 0 likes in the ranking?",
            value="yes",
        )
        repo_type = gr.Radio(
            choices=["space", "model", "dataset"],
            label="Type of repo",
            value="space",
            interactive=True,
        )
    run_btn = gr.Button("Show ranking for this Space or org/user!", label="Rank Space")
    result = gr.Markdown()
    run_btn.click(
        rank_space_and_org,
        inputs=[space_id, repo_type, filter_zero_likes],
        outputs=result,
    )
    gr.Markdown("## Leaderboard of Top 100 Spaces and Orgs/Users by Likes")
    gr.Markdown(
        """The leaderboard is updated every 30 minutes.
                Choose the type of repo to rank by likes and click the button to show the leaderboard."""
    )
    show_refresh_btn = gr.Button("Show/refresh Leaderboard", label="Refresh")
    with gr.Row():
        with gr.Accordion("Show rankings for Orgs and Users", open=False):
            org_user_ranking = gr.Markdown()
            show_refresh_btn.click(
                plot_top_n_orgs_and_users, inputs=[repo_type], outputs=org_user_ranking
            )
        with gr.Accordion("Show rankings for individual repositories", open=False):
            repo_level_ranking = gr.Markdown()
            show_refresh_btn.click(
                plot_top_n_hub_repos, inputs=[repo_type], outputs=repo_level_ranking
            )
demo.queue(concurrency_count=4).launch()