slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
52
url
stringlengths
25
46
totalUniqueImpressions
int64
1
42.1k
โŒ€
numComments
int64
0
621
428807573957776
[ { "type": "text", "value": "Hey, we've been researching how sentiment analysis models work on real world data and would like to share a comparison tool and leaderboard that we've built: ", "raw": "Hey, we've been researching how sentiment analysis models work on real world data and would like to share a comparison tool and leaderboard that we've built: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://addmaple.com/sentiment", "href": "https://addmaple.com/sentiment", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Compare 12 models - top RoBERTa models, plus Google/AWS commercial offerings and GPT4o.", "raw": "Compare 12 models - top RoBERTa models, plus Google/AWS commercial offerings and GPT4o.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Rather than just a score, you can explore results by those with the highest disagreement between models, which gives a nice intuition for the strengths and weaknesses of each model. ", "raw": "Rather than just a score, you can explore results by those with the highest disagreement between models, which gives a nice intuition for the strengths and weaknesses of each model. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For example a genuine restaurant review is: \"Food doesnโ€™t get better than this. I was sad when I finished, actually sad. To die for.\" - ", "raw": "For example a genuine restaurant review is: \"Food doesnโ€™t get better than this. I was sad when I finished, actually sad. To die for.\" - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://addmaple.com/sentiment/public-reviews/manteca/C9bvMuyAeF1g", "href": "https://addmaple.com/sentiment/public-reviews/manteca/C9bvMuyAeF1g", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SiEBERT gets it right, as do Google and OpenAI but all other models fail.", "raw": "SiEBERT gets it right, as do Google and OpenAI but all other models fail.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We'd love your feedback", "raw": "We'd love your feedback", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hitra", "raw": "Hitra", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hey, we've been researching how sentiment analysis models work on real world data and would like to share a comparison tool and leaderboard that we've built: https://addmaple.com/sentiment Compare 12 models - top RoBERTa models, plus Google/AWS commercial offerings and GPT4o. Rather than just a score, you can explore results by those with the highest disagreement between models, which gives a nice intuition for the strengths and weaknesses of each model. For example a genuine restaurant review is: "Food doesnโ€™t get better than this. I was sad when I finished, actually sad. To die for." - https://addmaple.com/sentiment/public-reviews/manteca/C9bvMuyAeF1g SiEBERT gets it right, as do Google and OpenAI but all other models fail. We'd love your feedback Hitra
{ "avatarUrl": "/avatars/cda5f7930a07d94b085ed7dcbc9fb9a8.svg", "fullname": "Hitra D", "name": "hitradostava", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "prithivMLmods" ], "count": 1 } ]
2024-10-21T19:26:28.000Z
2024-10-21T19:26:28.331Z
[]
/posts/hitradostava/428807573957776
1,143
0
262950692686767
[ { "type": "text", "value": "Today lets discuss about 32-bit (FP32) and 16-bit (FP16) floating-point!", "raw": "Today lets discuss about 32-bit (FP32) and 16-bit (FP16) floating-point!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Floating-point numbers are used to represent real numbers (like decimals) and they consist of three parts:", "raw": "Floating-point numbers are used to represent real numbers (like decimals) and they consist of three parts:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nSign bit: \nIndicates whether the number is positive (0) or negative (1).\nExponent:\nDetermines the scale of the number (i.e., how large or small it is by shifting the decimal point).\nMantissa (or fraction): \nRepresents the actual digits of the number.\n```", "href": null, "resource": null, "url": null, "code": "Sign bit: \nIndicates whether the number is positive (0) or negative (1).\nExponent:\nDetermines the scale of the number (i.e., how large or small it is by shifting the decimal point).\nMantissa (or fraction): \nRepresents the actual digits of the number.", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "32-bit Floating Point (FP32)", "raw": "32-bit Floating Point (FP32)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Total bits: 32 bits", "raw": "Total bits: 32 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign bit: 1 bit", "raw": "Sign bit: 1 bit", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exponent: 8 bits", "raw": "Exponent: 8 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mantissa: 23 bits", "raw": "Mantissa: 23 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For example:", "raw": "For example:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A number like -15.375 would be represented as:", "raw": "A number like -15.375 would be represented as:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign bit: 1 (negative number)", "raw": "Sign bit: 1 (negative number)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exponent: Stored after being adjusted by a bias (127 in FP32).", "raw": "Exponent: Stored after being adjusted by a bias (127 in FP32).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mantissa: The significant digits after converting the number to binary.", "raw": "Mantissa: The significant digits after converting the number to binary.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "16-bit Floating Point (FP16)", "raw": "16-bit Floating Point (FP16)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Total bits: 16 bits", "raw": "Total bits: 16 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign bit: 1 bit", "raw": "Sign bit: 1 bit", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exponent: 5 bits", "raw": "Exponent: 5 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mantissa: 10 bits", "raw": "Mantissa: 10 bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Example:", "raw": "Example:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A number like -15.375 would be stored similarly:", "raw": "A number like -15.375 would be stored similarly:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign bit: 1 (negative number)", "raw": "Sign bit: 1 (negative number)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exponent: Uses 5 bits, limiting the range compared to FP32.", "raw": "Exponent: Uses 5 bits, limiting the range compared to FP32.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mantissa: Only 10 bits for precision.", "raw": "Mantissa: Only 10 bits for precision.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Precision and Range", "raw": "Precision and Range", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FP32: Higher precision and larger range, with about 7 decimal places of accuracy.", "raw": "FP32: Higher precision and larger range, with about 7 decimal places of accuracy.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FP16: Less precision (around 3-4 decimal places), smaller range but faster computations and less memory use.", "raw": "FP16: Less precision (around 3-4 decimal places), smaller range but faster computations and less memory use.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Today lets discuss about 32-bit (FP32) and 16-bit (FP16) floating-point! Floating-point numbers are used to represent real numbers (like decimals) and they consist of three parts: ``` Sign bit: Indicates whether the number is positive (0) or negative (1). Exponent: Determines the scale of the number (i.e., how large or small it is by shifting the decimal point). Mantissa (or fraction): Represents the actual digits of the number. ``` 32-bit Floating Point (FP32) Total bits: 32 bits Sign bit: 1 bit Exponent: 8 bits Mantissa: 23 bits For example: A number like -15.375 would be represented as: Sign bit: 1 (negative number) Exponent: Stored after being adjusted by a bias (127 in FP32). Mantissa: The significant digits after converting the number to binary. 16-bit Floating Point (FP16) Total bits: 16 bits Sign bit: 1 bit Exponent: 5 bits Mantissa: 10 bits Example: A number like -15.375 would be stored similarly: Sign bit: 1 (negative number) Exponent: Uses 5 bits, limiting the range compared to FP32. Mantissa: Only 10 bits for precision. Precision and Range FP32: Higher precision and larger range, with about 7 decimal places of accuracy. FP16: Less precision (around 3-4 decimal places), smaller range but faster computations and less memory use.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "John6666", "lucianosb", "victor", "Gausq" ], "count": 5 }, { "reaction": "๐Ÿง ", "users": [ "prithivMLmods", "lucianosb" ], "count": 2 } ]
2024-10-21T15:11:27.000Z
2024-10-22T09:25:13.894Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/5cbfa6cbde933503bbc3577cf713e7b5.svg", "fullname": "Friedrich Marty", "name": "Smorty100", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false } ]
/posts/ImranzamanML/262950692686767
1,696
3
761776634129339
[ { "type": "text", "value": "I ported the hottest new shape-optimized SigLIP ๐Ÿ”ฅ ", "raw": "I ported the hottest new shape-optimized SigLIP ๐Ÿ”ฅ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/merve/siglip-so400m-patch16-256-i18n", "href": "https://huggingface.co/merve/siglip-so400m-patch16-256-i18n", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "if you don't want to wait for the next transformers release install transformers from my PR ", "raw": "if you don't want to wait for the next transformers release install transformers from my PR ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/transformers/pull/32938", "href": "https://github.com/huggingface/transformers/pull/32938", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " and initialize SigLIP from there", "raw": " and initialize SigLIP from there", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I ported the hottest new shape-optimized SigLIP ๐Ÿ”ฅ https://huggingface.co/merve/siglip-so400m-patch16-256-i18n if you don't want to wait for the next transformers release install transformers from my PR https://github.com/huggingface/transformers/pull/32938 and initialize SigLIP from there
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/IfAEXwgpki2XxYwR9Mm3Y.jpeg" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "prithivMLmods", "rwightman", "enzostvs", "ucsahin" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "YaTharThShaRma999", "pfung" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "atasoglu" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "fsommers" ], "count": 1 } ]
2024-10-21T11:59:05.000Z
2024-10-21T11:59:05.096Z
[]
/posts/merve/761776634129339
3,933
0
735922407872976
[ { "type": "text", "value": "I feel like this incredible resource hasn't gotten the attention it deserves in the community!", "raw": "I feel like this incredible resource hasn't gotten the attention it deserves in the community!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@clefourrier", "href": null, "resource": null, "url": null, "code": null, "user": "clefourrier", "label": null, "lang": null }, { "type": "text", "value": " and generally the HuggingFace evaluation team put together a fantastic guidebook covering a lot about ๐—˜๐—ฉ๐—”๐—Ÿ๐—จ๐—”๐—ง๐—œ๐—ข๐—ก from basics to advanced tips.", "raw": " and generally the HuggingFace evaluation team put together a fantastic guidebook covering a lot about ๐—˜๐—ฉ๐—”๐—Ÿ๐—จ๐—”๐—ง๐—œ๐—ข๐—ก from basics to advanced tips.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "link : ", "raw": "link : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/evaluation-guidebook", "href": "https://github.com/huggingface/evaluation-guidebook", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I havenโ€™t finished it yet, but i'am enjoying every piece of it so far. Huge thanks ", "raw": "I havenโ€™t finished it yet, but i'am enjoying every piece of it so far. Huge thanks ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@clefourrier", "href": null, "resource": null, "url": null, "code": null, "user": "clefourrier", "label": null, "lang": null }, { "type": "text", "value": " and the team for this invaluable resource !", "raw": " and the team for this invaluable resource !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I feel like this incredible resource hasn't gotten the attention it deserves in the community! @clefourrier and generally the HuggingFace evaluation team put together a fantastic guidebook covering a lot about ๐—˜๐—ฉ๐—”๐—Ÿ๐—จ๐—”๐—ง๐—œ๐—ข๐—ก from basics to advanced tips. link : https://github.com/huggingface/evaluation-guidebook I havenโ€™t finished it yet, but i'am enjoying every piece of it so far. Huge thanks @clefourrier and the team for this invaluable resource !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1644340617257-noauth.png", "fullname": "Clรฉmentine Fourrier", "name": "clefourrier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet", "prithivMLmods", "bilgeyucel", "den0620" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "pacificg" ], "count": 1 } ]
2024-10-21T11:27:59.000Z
2024-10-21T12:20:36.895Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false } ]
/posts/alielfilali01/735922407872976
1,635
3
354562758942371
[ { "type": "text", "value": "Ok, you're finally convinced that synthetic data works... โš—๏ธ", "raw": "Ok, you're finally convinced that synthetic data works... โš—๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐๐จ๐ฐ ๐ฒ๐จ๐ฎ ๐ฐ๐š๐ง๐ญ ๐ญ๐จ ๐ ๐ž๐ง๐ž๐ซ๐š๐ญ๐ž ๐š๐ง ๐ข๐ง๐ฌ๐ญ๐ซ๐ฎ๐œ๐ญ๐ข๐จ๐ง ๐๐š๐ญ๐š๐ฌ๐ž๐ญ ๐Ÿ๐จ๐ซ ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ข๐ง ๐š ๐ฅ๐š๐ง๐ ๐ฎ๐š๐ ๐ž ๐จ๐ญ๐ก๐ž๐ซ ๐ญ๐ก๐š๐ง ๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก.", "raw": "๐๐จ๐ฐ ๐ฒ๐จ๐ฎ ๐ฐ๐š๐ง๐ญ ๐ญ๐จ ๐ ๐ž๐ง๐ž๐ซ๐š๐ญ๐ž ๐š๐ง ๐ข๐ง๐ฌ๐ญ๐ซ๐ฎ๐œ๐ญ๐ข๐จ๐ง ๐๐š๐ญ๐š๐ฌ๐ž๐ญ ๐Ÿ๐จ๐ซ ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ข๐ง ๐š ๐ฅ๐š๐ง๐ ๐ฎ๐š๐ ๐ž ๐จ๐ญ๐ก๐ž๐ซ ๐ญ๐ก๐š๐ง ๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But how do you get started?", "raw": "But how do you get started?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I explore how to do this with Magpie in my new article", "raw": "I explore how to do this with Magpie in my new article", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/anakin87/multilingual-magpie", "href": "https://huggingface.co/blog/anakin87/multilingual-magpie", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "---", "raw": "---", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฆโ€โฌ› ๐–๐ก๐š๐ญ ๐ข๐ฌ ๐Œ๐š๐ ๐ฉ๐ข๐ž?", "raw": "๐Ÿฆโ€โฌ› ๐–๐ก๐š๐ญ ๐ข๐ฌ ๐Œ๐š๐ ๐ฉ๐ข๐ž?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It's a recent technique for creating synthetic instruction datasets.", "raw": "It's a recent technique for creating synthetic instruction datasets.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Magpie is based on a simple but ingenious idea ๐Ÿ‘‡", "raw": "Magpie is based on a simple but ingenious idea ๐Ÿ‘‡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "if you prompt an instruction-tuned model with a pre-query template, you can make it generate a plausible user query/instruction", "raw": "if you prompt an instruction-tuned model with a pre-query template, you can make it generate a plausible user query/instruction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's an example:", "raw": "Here's an example:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "model: Llama-3-8B-Instruct", "raw": "model: Llama-3-8B-Instruct", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "pre-query template: \"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\"", "raw": "pre-query template: \"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "generated user instruction: \"What are some of the responsibilities of a commercial pilot?\"", "raw": "generated user instruction: \"What are some of the responsibilities of a commercial pilot?\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can then feed this instruction back into the same model to get the assistant response.", "raw": "You can then feed this instruction back into the same model to get the assistant response.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "By repeating this process, it's possible to generate large synthetic datasets with relatively little effort.", "raw": "By repeating this process, it's possible to generate large synthetic datasets with relatively little effort.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿช„ The authors demonstrate that using these datasets for Supervised Fine Tuning (SFT) can yield strong performance, even competitive with the original instruct model.", "raw": "๐Ÿช„ The authors demonstrate that using these datasets for Supervised Fine Tuning (SFT) can yield strong performance, even competitive with the original instruct model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง—๐†๐ž๐ง๐ž๐ซ๐š๐ญ๐ข๐ง๐  ๐ง๐จ๐ง-๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก ๐๐š๐ญ๐š", "raw": "๐Ÿง—๐†๐ž๐ง๐ž๐ซ๐š๐ญ๐ข๐ง๐  ๐ง๐จ๐ง-๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก ๐๐š๐ญ๐š", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Most Language Models are primarily trained on English texts, so they tend to produce data in English.", "raw": "Most Language Models are primarily trained on English texts, so they tend to produce data in English.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How can we overcome this?", "raw": "How can we overcome this?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Earlier approaches were complex or costly.", "raw": "Earlier approaches were complex or costly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Then ", "raw": "Then ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mrm8488", "href": null, "resource": null, "url": null, "code": null, "user": "mrm8488", "label": null, "lang": null }, { "type": "text", "value": " found a simple solution: add the target language to the pre-query template.", "raw": " found a simple solution: add the target language to the pre-query template.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For Spanish, the template becomes \"<|begin_of_text|><|start_header_id|>user<|end_header_id|>spanish:\".", "raw": "For Spanish, the template becomes \"<|begin_of_text|><|start_header_id|>user<|end_header_id|>spanish:\".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This method works for Spanish and German!", "raw": "This method works for Spanish and German!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โŒ Unfortunately, it does not work well for other languages (๐Ÿ‡ฎ๐Ÿ‡น, ๐Ÿ‡ณ๐Ÿ‡ฑ, ...)", "raw": "โŒ Unfortunately, it does not work well for other languages (๐Ÿ‡ฎ๐Ÿ‡น, ๐Ÿ‡ณ๐Ÿ‡ฑ, ...)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‡", "raw": "๐Ÿ‘‡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Ok, you're finally convinced that synthetic data works... โš—๏ธ ๐๐จ๐ฐ ๐ฒ๐จ๐ฎ ๐ฐ๐š๐ง๐ญ ๐ญ๐จ ๐ ๐ž๐ง๐ž๐ซ๐š๐ญ๐ž ๐š๐ง ๐ข๐ง๐ฌ๐ญ๐ซ๐ฎ๐œ๐ญ๐ข๐จ๐ง ๐๐š๐ญ๐š๐ฌ๐ž๐ญ ๐Ÿ๐จ๐ซ ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ข๐ง ๐š ๐ฅ๐š๐ง๐ ๐ฎ๐š๐ ๐ž ๐จ๐ญ๐ก๐ž๐ซ ๐ญ๐ก๐š๐ง ๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก. But how do you get started? I explore how to do this with Magpie in my new article https://huggingface.co/blog/anakin87/multilingual-magpie --- ๐Ÿฆโ€โฌ› ๐–๐ก๐š๐ญ ๐ข๐ฌ ๐Œ๐š๐ ๐ฉ๐ข๐ž? It's a recent technique for creating synthetic instruction datasets. Magpie is based on a simple but ingenious idea ๐Ÿ‘‡ if you prompt an instruction-tuned model with a pre-query template, you can make it generate a plausible user query/instruction Here's an example: model: Llama-3-8B-Instruct pre-query template: "<|begin_of_text|><|start_header_id|>user<|end_header_id|>" generated user instruction: "What are some of the responsibilities of a commercial pilot?" You can then feed this instruction back into the same model to get the assistant response. By repeating this process, it's possible to generate large synthetic datasets with relatively little effort. ๐Ÿช„ The authors demonstrate that using these datasets for Supervised Fine Tuning (SFT) can yield strong performance, even competitive with the original instruct model. ๐Ÿง—๐†๐ž๐ง๐ž๐ซ๐š๐ญ๐ข๐ง๐  ๐ง๐จ๐ง-๐„๐ง๐ ๐ฅ๐ข๐ฌ๐ก ๐๐š๐ญ๐š Most Language Models are primarily trained on English texts, so they tend to produce data in English. How can we overcome this? Earlier approaches were complex or costly. Then @mrm8488 found a simple solution: add the target language to the pre-query template. For Spanish, the template becomes "<|begin_of_text|><|start_header_id|>user<|end_header_id|>spanish:". This method works for Spanish and German! โŒ Unfortunately, it does not work well for other languages (๐Ÿ‡ฎ๐Ÿ‡น, ๐Ÿ‡ณ๐Ÿ‡ฑ, ...) ๐Ÿ‘‡
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626505d493e0b04d75710566/LOu8cDPoLgWBFBsANIypB.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5e4318d616b09a31220980d6/24rMJ_vPh3gW9ZEmj64xr.png", "fullname": "Manuel Romero", "name": "mrm8488", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2200 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "tiendung", "djuna", "jgitsolutions" ], "count": 4 }, { "reaction": "๐Ÿคฏ", "users": [ "pacificg" ], "count": 1 } ]
2024-10-21T10:38:15.000Z
2024-10-21T15:50:47.492Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false } ]
/posts/anakin87/354562758942371
1,044
1
741785107403373
[ { "type": "text", "value": "Bellman, the Swedish finetune, has once again returned in his biggest incarnation yet, at 12b. Based on Mistral-Nemo-Instruct: ", "raw": "Bellman, the Swedish finetune, has once again returned in his biggest incarnation yet, at 12b. Based on Mistral-Nemo-Instruct: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/neph1/Mistral-Nemo-Instruct-bellman-12b", "href": null, "resource": { "type": "model", "id": "neph1/Mistral-Nemo-Instruct-bellman-12b", "discussionNum": null }, "url": "https://huggingface.co/neph1/Mistral-Nemo-Instruct-bellman-12b", "code": null, "user": null, "label": null, "lang": null } ]
Bellman, the Swedish finetune, has once again returned in his biggest incarnation yet, at 12b. Based on Mistral-Nemo-Instruct: https://huggingface.co/neph1/Mistral-Nemo-Instruct-bellman-12b
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/653cd3049107029eb004f968/Y4XphXmk8P51GlIi6u9cd.png", "fullname": "Rickard Edรฉn", "name": "neph1", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-21T10:10:00.000Z
2024-10-21T10:10:00.797Z
[]
/posts/neph1/741785107403373
473
0
852879216298219
[ { "type": "text", "value": "Spent the weekend testing out some prompts with ๐Ÿ•ต๏ธโ€โ™‚๏ธMystery Bot๐Ÿ•ต๏ธโ€โ™‚๏ธ on my mobile... exciting things are coming soon for the following languages:", "raw": "Spent the weekend testing out some prompts with ๐Ÿ•ต๏ธโ€โ™‚๏ธMystery Bot๐Ÿ•ต๏ธโ€โ™‚๏ธ on my mobile... exciting things are coming soon for the following languages:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŒArabic, Chinese, Czech, Dutch, English French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese!๐ŸŒ", "raw": "๐ŸŒArabic, Chinese, Czech, Dutch, English French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese!๐ŸŒ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Spent the weekend testing out some prompts with ๐Ÿ•ต๏ธโ€โ™‚๏ธMystery Bot๐Ÿ•ต๏ธโ€โ™‚๏ธ on my mobile... exciting things are coming soon for the following languages: ๐ŸŒArabic, Chinese, Czech, Dutch, English French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese!๐ŸŒ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641b754d1911d3be6745cce9/GXN8mEmaq3rfITRrw7GeZ.jpeg", "fullname": "atayloraerospace", "name": "Taylor658", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 76, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/5N9iUDEhtkv3V5hoykr0V.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/N4mFkqTrKI5LsKAi1KSea.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/9YxMq08oiCh3UbQ52_DLr.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/JnCVfMEUyYUF2ikj4hqYM.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/5f_HAq3F-bjpjM5MaBJbv.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/_S1iACcsP77CHJZ34Y2GN.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/f6j2tjvNGveRTpCtEtz3f.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "sitloboi2012", "kenza-ily", "shivi", "bisnotforbella", "Ercin", "apol", "nanyy1025" ], "count": 8 }, { "reaction": "๐Ÿ”ฅ", "users": [ "shivi", "nanyy1025" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "shivi", "nanyy1025" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "shivi", "nanyy1025" ], "count": 2 } ]
2024-10-21T04:49:02.000Z
2024-10-21T04:49:02.878Z
[]
/posts/Taylor658/852879216298219
2,500
0
259122760899906
[ { "type": "text", "value": "Iโ€™m recently experimenting with the Flux-Ultra Realism and Real Anime LoRA models, using the Flux.1-dev model as the base. The model and its demo example are provided in the Flux LoRA DLC collections.๐Ÿ“ƒ", "raw": "Iโ€™m recently experimenting with the Flux-Ultra Realism and Real Anime LoRA models, using the Flux.1-dev model as the base. The model and its demo example are provided in the Flux LoRA DLC collections.๐Ÿ“ƒ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸฅณDemo : ๐Ÿ”— ", "raw": "๐ŸฅณDemo : ๐Ÿ”— ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "href": null, "resource": { "type": "space", "id": "prithivMLmods/FLUX-LoRA-DLC", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸฅณModel: ", "raw": "๐ŸฅณModel: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "href": null, "resource": { "type": "model", "id": "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/Flux-Dev-Real-Anime-LoRA", "href": null, "resource": { "type": "model", "id": "prithivMLmods/Flux-Dev-Real-Anime-LoRA", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/Flux-Dev-Real-Anime-LoRA", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸฅณFor more details, please visit the README.md of the Flux LoRA DLC Space & ", "raw": "๐ŸฅณFor more details, please visit the README.md of the Flux LoRA DLC Space & ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32", "href": null, "resource": { "type": "collection", "id": "prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32", "discussionNum": null }, "url": "https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32", "code": null, "user": null, "label": null, "lang": null } ]
Iโ€™m recently experimenting with the Flux-Ultra Realism and Real Anime LoRA models, using the Flux.1-dev model as the base. The model and its demo example are provided in the Flux LoRA DLC collections.๐Ÿ“ƒ ๐ŸฅณDemo : ๐Ÿ”— https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC ๐ŸฅณModel: - https://huggingface.co/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0 - https://huggingface.co/prithivMLmods/Flux-Dev-Real-Anime-LoRA ๐ŸฅณFor more details, please visit the README.md of the Flux LoRA DLC Space & https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 393, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/-OHlyIsjCL6CoDvrhXvD1.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/LRpVxO4KxHAQ8EgLFFtMu.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/GQ4WL5s9DR7UcZJakqzPJ.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/9mtv4aFpZGRssehT_VOuZ.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/CWGeCWZEqAOKnYe3_-1xE.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/YvTy_ymz-G6eQ5eXv_QeS.webp" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "John6666", "prithivMLmods", "Rhyzhkovaa", "den0620", "AdinaY", "chethan62", "ai4life44", "Ngrthm" ], "count": 8 }, { "reaction": "๐Ÿš€", "users": [ "darksfx", "jematos92", "den0620", "Stopwolf", "AdinaY", "prithivMLmods", "ai4life44" ], "count": 7 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Minhajameen", "Stopwolf", "AdinaY", "prithivMLmods", "ai4life44" ], "count": 5 }, { "reaction": "๐Ÿง ", "users": [ "ai4life44", "Ngrthm" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "Ngrthm" ], "count": 1 } ]
2024-10-20T19:44:10.000Z
2024-10-23T03:34:47.622Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/67186e2e5433befe1ee225a4/kWRftAYDNf_HJZgaJQM2A.jpeg", "fullname": "Muhammad Niyaz", "name": "sajjad112233", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/prithivMLmods/259122760899906
3,939
1
338189899958661
[ { "type": "text", "value": "๐ŸŒ Introducing Websim.ai User Projects Dataset - ", "raw": "๐ŸŒ Introducing Websim.ai User Projects Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/websim", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/websim", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/websim", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 137,452 user projects from Websim.ai, a service for creating small sites using Large Language Models (LLMs)", "raw": "- 137,452 user projects from Websim.ai, a service for creating small sites using Large Language Models (LLMs)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in English, with potential for multilingual content in generated websites", "raw": "- Primarily in English, with potential for multilingual content in generated websites", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: project metadata, user information, and generated HTML content", "raw": "- Each entry includes: project metadata, user information, and generated HTML content", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains detailed information about project revisions, site generation, and user interactions", "raw": "- Contains detailed information about project revisions, site generation, and user interactions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers a wide range of user-generated website projects created through AI assistance", "raw": "- Data covers a wide range of user-generated website projects created through AI assistance", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "raw": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing AI-assisted web development trends, studying user behavior in LLM-powered creative tools, and exploring the capabilities of language models in web design.", "raw": "The dataset can be used for analyzing AI-assisted web development trends, studying user behavior in LLM-powered creative tools, and exploring the capabilities of language models in web design.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŒ Introducing Websim.ai User Projects Dataset - https://huggingface.co/datasets/nyuuzyou/websim Dataset highlights: - 137,452 user projects from Websim.ai, a service for creating small sites using Large Language Models (LLMs) - Primarily in English, with potential for multilingual content in generated websites - Each entry includes: project metadata, user information, and generated HTML content - Contains detailed information about project revisions, site generation, and user interactions - Data covers a wide range of user-generated website projects created through AI assistance - Dedicated to the public domain under Creative Commons Zero (CC0) license The dataset can be used for analyzing AI-assisted web development trends, studying user behavior in LLM-powered creative tools, and exploring the capabilities of language models in web design.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Eyel" ], "count": 2 } ]
2024-10-20T19:40:44.000Z
2024-10-20T19:40:44.655Z
[]
/posts/nyuuzyou/338189899958661
1,396
0
309802757493429
[ { "type": "text", "value": "Gradio not scrollable on iOS", "raw": "Gradio not scrollable on iOS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Gradio not scrollable on iOS
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/OMEqajG_I9VemRa-NndDs.png", "fullname": "Michael bollox", "name": "MichaelBoll", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿง ", "users": [ "John6666" ], "count": 1 } ]
2024-10-20T17:16:38.000Z
2024-10-20T23:27:03.491Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/MichaelBoll/309802757493429
618
2
948143434308209
[ { "type": "text", "value": "This is probably a very hot take, but here goes nothing.", "raw": "This is probably a very hot take, but here goes nothing.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With the incredibly accurate LoRAs we see emerge for high quality models like FLUX from services like fal.ai that offer training within single digit minutes, e.g. 2 min per 1000 iterations.", "raw": "With the incredibly accurate LoRAs we see emerge for high quality models like FLUX from services like fal.ai that offer training within single digit minutes, e.g. 2 min per 1000 iterations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Why the hell are people publishing private LoRAs as public models?!", "raw": "Why the hell are people publishing private LoRAs as public models?!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Take a look at this listing: ", "raw": "Take a look at this listing: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/models?other=base_model:adapter:black-forest-labs%2FFLUX.1-dev&sort=created", "href": "https://huggingface.co/models?other=base_model:adapter:black-forest-labs%2FFLUX.1-dev&sort=created", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I would expect that people that hold a HF account have some kind of forward thinking. Heck, do you really want to give anyone the power to create ultra realistic images of yourself?!", "raw": "I would expect that people that hold a HF account have some kind of forward thinking. Heck, do you really want to give anyone the power to create ultra realistic images of yourself?!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Didn't we learn anything from social media? ", "raw": "Didn't we learn anything from social media? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I am puzzled..", "raw": "I am puzzled..", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is probably a very hot take, but here goes nothing. With the incredibly accurate LoRAs we see emerge for high quality models like FLUX from services like fal.ai that offer training within single digit minutes, e.g. 2 min per 1000 iterations. Why the hell are people publishing private LoRAs as public models?! Take a look at this listing: https://huggingface.co/models?other=base_model:adapter:black-forest-labs%2FFLUX.1-dev&sort=created I would expect that people that hold a HF account have some kind of forward thinking. Heck, do you really want to give anyone the power to create ultra realistic images of yourself?! Didn't we learn anything from social media? I am puzzled..
{ "avatarUrl": "/avatars/67b2e111ee8541e8033dab5ee1ca0eb6.svg", "fullname": "PZ", "name": "philipp-zettl", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-20T15:00:06.000Z
2024-10-22T21:12:10.871Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/5cbfa6cbde933503bbc3577cf713e7b5.svg", "fullname": "Friedrich Marty", "name": "Smorty100", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/67b2e111ee8541e8033dab5ee1ca0eb6.svg", "fullname": "PZ", "name": "philipp-zettl", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/philipp-zettl/948143434308209
724
6
126778565806623
[ { "type": "text", "value": "Last Week in Medical AI: Top LLM Research ", "raw": "Last Week in Medical AI: Top LLM Research ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿ”ฅ", "raw": " ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ… (October 12 - October 19, 2024)", "raw": "๐Ÿ… (October 12 - October 19, 2024)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM & Other Models:", "raw": "Medical LLM & Other Models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- OLAPH: Factual Biomedical LLM QA", "raw": "- OLAPH: Factual Biomedical LLM QA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMD: Interpreting Longitudinal Medical Records", "raw": "- LLMD: Interpreting Longitudinal Medical Records", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LifeGPT: Generative Transformer for Cells", "raw": "- LifeGPT: Generative Transformer for Cells", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedCare: Decoupled Clinical LLM Alignment", "raw": "- MedCare: Decoupled Clinical LLM Alignment", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Y-Mol: Biomedical LLM for Drug Development", "raw": "- Y-Mol: Biomedical LLM for Drug Development", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Frameworks and Methodologies:", "raw": "Frameworks and Methodologies:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedINST: Biomedical Instructions Meta Dataset", "raw": "- MedINST: Biomedical Instructions Meta Dataset", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Democratizing Medical LLMs via Language Experts", "raw": "- Democratizing Medical LLMs via Language Experts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MCQG-SRefine: Iterative Question Generation", "raw": "- MCQG-SRefine: Iterative Question Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Adaptive Medical Language Agents", "raw": "- Adaptive Medical Language Agents", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MeNTi: Medical LLM with Nested Tools", "raw": "- MeNTi: Medical LLM with Nested Tools", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM Applications:", "raw": "Medical LLM Applications:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- AGENTiGraph: LLM Chatbots with Private Data", "raw": "- AGENTiGraph: LLM Chatbots with Private Data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MMed-RAG: Multimodal Medical RAG System", "raw": "- MMed-RAG: Multimodal Medical RAG System", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Medical Graph RAG: Safe LLM via Retrieval", "raw": "- Medical Graph RAG: Safe LLM via Retrieval", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedAide: Multi-Agent Medical LLM Collaboration", "raw": "- MedAide: Multi-Agent Medical LLM Collaboration", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Synthetic Clinical Trial Generation", "raw": "- Synthetic Clinical Trial Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLMs & Benchmarks:", "raw": "Medical LLMs & Benchmarks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- WorldMedQA-V: Multimodal Medical LLM Dataset", "raw": "- WorldMedQA-V: Multimodal Medical LLM Dataset", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- HEALTH-PARIKSHA: RAG Models Evaluation", "raw": "- HEALTH-PARIKSHA: RAG Models Evaluation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Synthetic Data for Medical Vision-Language", "raw": "- Synthetic Data for Medical Vision-Language", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "raw": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Youtube: ", "raw": "- Youtube: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/LROOjWXUgvg?si=s-nNDOSD3BrsHYjQ", "href": "https://youtu.be/LROOjWXUgvg?si=s-nNDOSD3BrsHYjQ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Spotify : ", "raw": "- Spotify : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://open.spotify.com/episode/12xeN2vnOTRdDrHbWqhV6I?si=bd7c8d9fee8049fd", "href": "https://open.spotify.com/episode/12xeN2vnOTRdDrHbWqhV6I?si=bd7c8d9fee8049fd", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Week in Medical AI: Top LLM Research Papers/Models ๐Ÿ”ฅ ๐Ÿ… (October 12 - October 19, 2024) Medical LLM & Other Models: - OLAPH: Factual Biomedical LLM QA - LLMD: Interpreting Longitudinal Medical Records - LifeGPT: Generative Transformer for Cells - MedCare: Decoupled Clinical LLM Alignment - Y-Mol: Biomedical LLM for Drug Development Frameworks and Methodologies: - MedINST: Biomedical Instructions Meta Dataset - Democratizing Medical LLMs via Language Experts - MCQG-SRefine: Iterative Question Generation - Adaptive Medical Language Agents - MeNTi: Medical LLM with Nested Tools Medical LLM Applications: - AGENTiGraph: LLM Chatbots with Private Data - MMed-RAG: Multimodal Medical RAG System - Medical Graph RAG: Safe LLM via Retrieval - MedAide: Multi-Agent Medical LLM Collaboration - Synthetic Clinical Trial Generation Medical LLMs & Benchmarks: - WorldMedQA-V: Multimodal Medical LLM Dataset - HEALTH-PARIKSHA: RAG Models Evaluation - Synthetic Data for Medical Vision-Language Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well! - Youtube: https://youtu.be/LROOjWXUgvg?si=s-nNDOSD3BrsHYjQ - Spotify : https://open.spotify.com/episode/12xeN2vnOTRdDrHbWqhV6I?si=bd7c8d9fee8049fd
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Vj9HYQwSyZV-zcW_b_xdh.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "JoPmt", "Joseph717171", "VISHNUDHAT" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "aaditya", "benhachem", "Joseph717171", "shetumohanto" ], "count": 4 }, { "reaction": "๐Ÿš€", "users": [ "aaditya", "John6666", "Joseph717171" ], "count": 3 }, { "reaction": "๐Ÿค—", "users": [ "aaditya", "Joseph717171" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "aaditya", "Joseph717171" ], "count": 2 }, { "reaction": "๐Ÿค", "users": [ "uDivy", "aaditya" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "Leotrim" ], "count": 1 } ]
2024-10-20T13:49:51.000Z
2024-10-30T14:04:26.109Z
[ { "avatarUrl": "/avatars/9df46be074977d1506b8eb8402aaec19.svg", "fullname": "Surbhi Sharma", "name": "Surbhi123", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/aaditya/126778565806623
2,737
1
658793400661194
[ { "type": "text", "value": "How the ๐Ÿ—ฃ๐Ÿ† leaderboard of a merged TTS Arena with the ๐Ÿค— Spaces fork would look like. These results are somewhat unreliable as some models have not challenged the other in the list. And the original TTS Arena used only narration type sentences.", "raw": "How the ๐Ÿ—ฃ๐Ÿ† leaderboard of a merged TTS Arena with the ๐Ÿค— Spaces fork would look like. These results are somewhat unreliable as some models have not challenged the other in the list. And the original TTS Arena used only narration type sentences.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
How the ๐Ÿ—ฃ๐Ÿ† leaderboard of a merged TTS Arena with the ๐Ÿค— Spaces fork would look like. These results are somewhat unreliable as some models have not challenged the other in the list. And the original TTS Arena used only narration type sentences.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d52e0c4e5642795617f668/ztXLrdFz3gkUJUIIQXfHo.png", "fullname": "Yanis L", "name": "Pendrokar", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d52e0c4e5642795617f668/Fvvow7niwpn9RLVSDf7mu.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-20T13:43:14.000Z
2024-11-01T11:57:31.066Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d52e0c4e5642795617f668/ztXLrdFz3gkUJUIIQXfHo.png", "fullname": "Yanis L", "name": "Pendrokar", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false } ]
/posts/Pendrokar/658793400661194
637
2
765430038894135
[ { "type": "text", "value": "๐Ÿš€ Announcement for the Lovely community! ๐Ÿš€", "raw": "๐Ÿš€ Announcement for the Lovely community! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Just launched the ", "raw": "Just launched the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/zamal/DeepSeek-VL-1.3B-Chat", "href": null, "resource": { "type": "space", "id": "zamal/DeepSeek-VL-1.3B-Chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/zamal/DeepSeek-VL-1.3B-Chat", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " on Hugging Face, and it's ready for YOU to explore! ๐Ÿ’ฌ๐Ÿ–ผ๏ธ", "raw": " on Hugging Face, and it's ready for YOU to explore! ๐Ÿ’ฌ๐Ÿ–ผ๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This full-fledged model is perfect for advanced image and text interactions, with zero GPU required. The Deepseek VL-1.3B Chat typically needs around 8 GB of VRAM and storage of almost 4 GB, but now you can experience it hassle-free right on our space!", "raw": "This full-fledged model is perfect for advanced image and text interactions, with zero GPU required. The Deepseek VL-1.3B Chat typically needs around 8 GB of VRAM and storage of almost 4 GB, but now you can experience it hassle-free right on our space!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Want something lighter? Weโ€™ve also uploaded a 4 bit quantized version (just around 1GB!), available on my profile. Perfect for those with limited hardware. ๐ŸŒ๐Ÿ”", "raw": "Want something lighter? Weโ€™ve also uploaded a 4 bit quantized version (just around 1GB!), available on my profile. Perfect for those with limited hardware. ๐ŸŒ๐Ÿ”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Come try it now and see what this model can do! ๐Ÿš€โœจ", "raw": "Come try it now and see what this model can do! ๐Ÿš€โœจ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Announcement for the Lovely community! ๐Ÿš€ Just launched the https://huggingface.co/spaces/zamal/DeepSeek-VL-1.3B-Chat on Hugging Face, and it's ready for YOU to explore! ๐Ÿ’ฌ๐Ÿ–ผ๏ธ This full-fledged model is perfect for advanced image and text interactions, with zero GPU required. The Deepseek VL-1.3B Chat typically needs around 8 GB of VRAM and storage of almost 4 GB, but now you can experience it hassle-free right on our space! Want something lighter? Weโ€™ve also uploaded a 4 bit quantized version (just around 1GB!), available on my profile. Perfect for those with limited hardware. ๐ŸŒ๐Ÿ” Come try it now and see what this model can do! ๐Ÿš€โœจ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6404403bad54665351d42ee2/TCC5Na8ojtSL1MJAzTn3b.png", "fullname": "zamal_", "name": "zamal", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "zamal", "John6666", "louisbrulenaudet" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "Norod78" ], "count": 1 } ]
2024-10-20T11:44:42.000Z
2024-10-20T11:44:42.219Z
[]
/posts/zamal/765430038894135
1,712
0
118954679084753
[ { "type": "text", "value": "Triton nanoGPT now has a custom cross entropy loss kernel ๐Ÿš€ ", "raw": "Triton nanoGPT now has a custom cross entropy loss kernel ๐Ÿš€ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Next: matmul, gradually overthrowing all major PyTorch ops:) ", "raw": "Next: matmul, gradually overthrowing all major PyTorch ops:) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Simplified pseudo for parallel cross-entropy loss compute:", "raw": "Simplified pseudo for parallel cross-entropy loss compute:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- init program: get pid, compute offsets, load targets.", "raw": "- init program: get pid, compute offsets, load targets.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- init row_max and row_sum.", "raw": "- init row_max and row_sum.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- for-loop1 (find max logits): update row_max with max logits.", "raw": "- for-loop1 (find max logits): update row_max with max logits.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- for-loop2 (compute softmax and loss): compute row_sum, update loss.", "raw": "- for-loop2 (compute softmax and loss): compute row_sum, update loss.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- add log(row_sum) and store loss. ", "raw": "- add log(row_sum) and store loss. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/ai-algorithms/blob/main/triton_nanoGPT.ipynb", "href": "https://github.com/Jaykef/ai-algorithms/blob/main/triton_nanoGPT.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Triton nanoGPT now has a custom cross entropy loss kernel ๐Ÿš€ Next: matmul, gradually overthrowing all major PyTorch ops:) Simplified pseudo for parallel cross-entropy loss compute: - init program: get pid, compute offsets, load targets. - init row_max and row_sum. - for-loop1 (find max logits): update row_max with max logits. - for-loop2 (compute softmax and loss): compute row_sum, update loss. - add log(row_sum) and store loss. Code: https://github.com/Jaykef/ai-algorithms/blob/main/triton_nanoGPT.ipynb
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/W-B2pVJMv1NA44zodcyRp.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/KU6k-fFdnoQM15xKBYwq7.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "Sri-Vigneshwar-DJ", "John6666", "sugatoray", "Joseph717171" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Joseph717171" ], "count": 2 } ]
2024-10-20T10:41:37.000Z
2024-10-21T03:24:22.226Z
[]
/posts/Jaward/118954679084753
1,730
0
978136798577934
[ { "type": "text", "value": "Looks like ", "raw": "Looks like ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Meta", "href": null, "resource": null, "url": null, "code": null, "user": "Meta", "label": null, "lang": null }, { "type": "text", "value": " thinks we forgot they created PyTorch, so now they've open-sourced Lingua, a powerful and flexible library for training and inferencing large language models.", "raw": " thinks we forgot they created PyTorch, so now they've open-sourced Lingua, a powerful and flexible library for training and inferencing large language models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Things that stand out:", "raw": "Things that stand out:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Architecture: Pure PyTorch ", "raw": "- Architecture: Pure PyTorch ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`nn.Module`", "href": null, "resource": null, "url": null, "code": "nn.Module", "user": null, "label": null, "lang": null }, { "type": "text", "value": " implementation for easy customization.", "raw": " implementation for easy customization.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Checkpointing: Uses the new PyTorch distributed saving method (.distcp format) for flexible model reloading across different GPU configurations.", "raw": "- Checkpointing: Uses the new PyTorch distributed saving method (.distcp format) for flexible model reloading across different GPU configurations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Configuration: Utilizes data classes and YAML files for intuitive setup and modification.", "raw": "- Configuration: Utilizes data classes and YAML files for intuitive setup and modification.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Profiling: Integrates with xFormers' profiler for automatic MFU and HFU calculation, plus memory profiling.", "raw": "- Profiling: Integrates with xFormers' profiler for automatic MFU and HFU calculation, plus memory profiling.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Slurm Integration: Includes ", "raw": "- Slurm Integration: Includes ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`stool.py`", "href": null, "resource": null, "url": null, "code": "stool.py", "user": null, "label": null, "lang": null }, { "type": "text", "value": " for seamless job launching on Slurm clusters.", "raw": " for seamless job launching on Slurm clusters.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some results from ", "raw": "Some results from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Meta", "href": null, "resource": null, "url": null, "code": null, "user": "Meta", "label": null, "lang": null }, { "type": "text", "value": " to show off:", "raw": " to show off:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 1B parameter models trained on 60B tokens achieve strong performance across various NLP tasks.", "raw": "- 1B parameter models trained on 60B tokens achieve strong performance across various NLP tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 7B parameter Mamba model (trained on 200B tokens) shows competitive results with Llama 7B on benchmarks like ARC, MMLU, and BBH.", "raw": "- 7B parameter Mamba model (trained on 200B tokens) shows competitive results with Llama 7B on benchmarks like ARC, MMLU, and BBH.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you're working on LLM research or looking to experiment with cutting-edge language model architectures, Lingua is definitely worth exploring.", "raw": "If you're working on LLM research or looking to experiment with cutting-edge language model architectures, Lingua is definitely worth exploring.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Looks like @Meta thinks we forgot they created PyTorch, so now they've open-sourced Lingua, a powerful and flexible library for training and inferencing large language models. Things that stand out: - Architecture: Pure PyTorch `nn.Module` implementation for easy customization. - Checkpointing: Uses the new PyTorch distributed saving method (.distcp format) for flexible model reloading across different GPU configurations. - Configuration: Utilizes data classes and YAML files for intuitive setup and modification. - Profiling: Integrates with xFormers' profiler for automatic MFU and HFU calculation, plus memory profiling. - Slurm Integration: Includes `stool.py` for seamless job launching on Slurm clusters. Some results from @Meta to show off: - 1B parameter models trained on 60B tokens achieve strong performance across various NLP tasks. - 7B parameter Mamba model (trained on 200B tokens) shows competitive results with Llama 7B on benchmarks like ARC, MMLU, and BBH. If you're working on LLM research or looking to experiment with cutting-edge language model architectures, Lingua is definitely worth exploring.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/PclvWWChz7EQuptcqtzdt.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61e8c67cee1e1440121f0240/9sb__WsO5mwmdHHa6xKNc.jpeg", "fullname": "Meta World Peace", "name": "Meta", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5 } ]
[ { "reaction": "๐Ÿค", "users": [ "GoDjMike", "louisbrulenaudet", "flflow" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-20T09:09:28.000Z
2024-10-20T09:09:28.860Z
[]
/posts/singhsidhukuldeep/978136798577934
1,295
0
190548949064033
[ { "type": "text", "value": "Interested in performing inference with an ONNX model?โšก๏ธ", "raw": "Interested in performing inference with an ONNX model?โšก๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Optimum docs about model inference with ONNX Runtime is now much clearer and simpler!", "raw": "The Optimum docs about model inference with ONNX Runtime is now much clearer and simpler!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You want to deploy your favorite model on the hub but you don't know how to export it to the ONNX format? You can do it in one line of code as follows:", "raw": "You want to deploy your favorite model on the hub but you don't know how to export it to the ONNX format? You can do it in one line of code as follows:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```py\nfrom optimum.onnxruntime import ORTModelForSequenceClassification\n\n# Load the model from the hub and export it to the ONNX format\nmodel_id = \"distilbert-base-uncased-finetuned-sst-2-english\"\nmodel = ORTModelForSequenceClassification.from_pretrained(model_id, export=True)\n```", "href": null, "resource": null, "url": null, "code": "from optimum.onnxruntime import ORTModelForSequenceClassification\n\n# Load the model from the hub and export it to the ONNX format\nmodel_id = \"distilbert-base-uncased-finetuned-sst-2-english\"\nmodel = ORTModelForSequenceClassification.from_pretrained(model_id, export=True)", "user": null, "label": null, "lang": "py" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the whole guide ๐Ÿ‘‰ ", "raw": "Check out the whole guide ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models", "href": "https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Interested in performing inference with an ONNX model?โšก๏ธ The Optimum docs about model inference with ONNX Runtime is now much clearer and simpler! You want to deploy your favorite model on the hub but you don't know how to export it to the ONNX format? You can do it in one line of code as follows: ```py from optimum.onnxruntime import ORTModelForSequenceClassification # Load the model from the hub and export it to the ONNX format model_id = "distilbert-base-uncased-finetuned-sst-2-english" model = ORTModelForSequenceClassification.from_pretrained(model_id, export=True) ``` Check out the whole guide ๐Ÿ‘‰ https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/620b7c408f5871b8a1a168a7/49M2lucv3I24rOMJFnhVd.jpeg", "fullname": "Rรฉgis Pierrard", "name": "regisss", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 56, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "John6666", "adeiza-x", "louisbrulenaudet", "Aurelien-Morgan", "itsbrex" ], "count": 5 } ]
2024-10-20T08:37:34.000Z
2024-10-20T08:38:16.672Z
[]
/posts/regisss/190548949064033
1,324
0
787219870804010
[ { "type": "text", "value": "Hi HuggingFacers!๐Ÿค—", "raw": "Hi HuggingFacers!๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As you may have probably heard, in the past weeks three Tech Giants (Microsoft, Amazon and Google) announced that they would bet on nuclear reactors to feed the surging energy demand of data centers, driven by increasing AI data and computational flows. ", "raw": "As you may have probably heard, in the past weeks three Tech Giants (Microsoft, Amazon and Google) announced that they would bet on nuclear reactors to feed the surging energy demand of data centers, driven by increasing AI data and computational flows. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I try to explain the state of AI energy consumptions, its environmental impact and the key points of \"turning AI nuclear\" in my last article on HF community blog: ", "raw": "I try to explain the state of AI energy consumptions, its environmental impact and the key points of \"turning AI nuclear\" in my last article on HF community blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/as-cle-bert/ai-is-turning-nuclear-a-review", "href": "https://huggingface.co/blog/as-cle-bert/ai-is-turning-nuclear-a-review", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enjoy the reading!๐ŸŒฑ", "raw": "Enjoy the reading!๐ŸŒฑ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi HuggingFacers!๐Ÿค— As you may have probably heard, in the past weeks three Tech Giants (Microsoft, Amazon and Google) announced that they would bet on nuclear reactors to feed the surging energy demand of data centers, driven by increasing AI data and computational flows. I try to explain the state of AI energy consumptions, its environmental impact and the key points of "turning AI nuclear" in my last article on HF community blog: https://huggingface.co/blog/as-cle-bert/ai-is-turning-nuclear-a-review Enjoy the reading!๐ŸŒฑ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg", "fullname": "Astra Clelia Bertelli", "name": "as-cle-bert", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 650, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "rmayormartins", "John6666", "den0620", "daniel-ltw", "not-lain", "louisbrulenaudet", "SriramRokkam", "odyss3y", "julesbsz" ], "count": 9 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Sam000001", "odyss3y" ], "count": 2 } ]
2024-10-20T00:24:27.000Z
2024-10-20T00:24:27.457Z
[]
/posts/as-cle-bert/787219870804010
3,206
0
912528874443774
[ { "type": "text", "value": "Last Thursday at KaggleX organized by Google, I presented a workshop on \"Unlocking the Power of Large Language Models (LLMs) for Business Applications\" where I explained how we can reduce the size of LLM models to make them more suitable for business use and addressing common resource limitations.", "raw": "Last Thursday at KaggleX organized by Google, I presented a workshop on \"Unlocking the Power of Large Language Models (LLMs) for Business Applications\" where I explained how we can reduce the size of LLM models to make them more suitable for business use and addressing common resource limitations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://drive.google.com/file/d/1p5sT4_DeyBuwCqmYt4dCJKZOgLMpESzR/view", "href": "https://drive.google.com/file/d/1p5sT4_DeyBuwCqmYt4dCJKZOgLMpESzR/view", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Thursday at KaggleX organized by Google, I presented a workshop on "Unlocking the Power of Large Language Models (LLMs) for Business Applications" where I explained how we can reduce the size of LLM models to make them more suitable for business use and addressing common resource limitations. https://drive.google.com/file/d/1p5sT4_DeyBuwCqmYt4dCJKZOgLMpESzR/view
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "CyberMad41", "Joseph717171", "tiendung" ], "count": 4 } ]
2024-10-19T21:43:17.000Z
2024-10-19T21:43:17.737Z
[]
/posts/ImranzamanML/912528874443774
1,281
0
813693468664538
[ { "type": "text", "value": "Every adult on the planet knows what a vector is and has the basic understanding of how they are utilized right in their heads. You just don't know it as vector math. You do not know a 2-D vector as a 2-D vector, you know it as a graph. Want to know more? Check out this video, I break down the concept in about 10 minutes and I am positive you will fully understand it by the end: ", "raw": "Every adult on the planet knows what a vector is and has the basic understanding of how they are utilized right in their heads. You just don't know it as vector math. You do not know a 2-D vector as a 2-D vector, you know it as a graph. Want to know more? Check out this video, I break down the concept in about 10 minutes and I am positive you will fully understand it by the end: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/Iny2ughcGsA", "href": "https://youtu.be/Iny2ughcGsA", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Every adult on the planet knows what a vector is and has the basic understanding of how they are utilized right in their heads. You just don't know it as vector math. You do not know a 2-D vector as a 2-D vector, you know it as a graph. Want to know more? Check out this video, I break down the concept in about 10 minutes and I am positive you will fully understand it by the end: https://youtu.be/Iny2ughcGsA
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "djuna" ], "count": 2 } ]
2024-10-19T19:38:51.000Z
2024-10-21T08:17:53.130Z
[ { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/TuringsSolutions/813693468664538
1,078
1
732259995611619
[ { "type": "text", "value": "Artificial Minds, Human Consequences: Unraveling AIโ€™s Impact on Education, Cognition, and Cultural Production", "raw": "Artificial Minds, Human Consequences: Unraveling AIโ€™s Impact on Education, Cognition, and Cultural Production", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://empereur-pirate.medium.com/artificial-minds-human-consequences-unraveling-ais-impact-on-education-cognition-and-cultural-a503b88d4524", "href": "https://empereur-pirate.medium.com/artificial-minds-human-consequences-unraveling-ais-impact-on-education-cognition-and-cultural-a503b88d4524", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This text critiques AI language models, particularly in education, arguing their use hinders deep learning, critical thinking, and social-emotional development despite automating tasks. It raises copyright concerns, highlighting lawsuits against companies like OpenAI and Microsoft. AI \"creativity\" is challenged, emphasizing its reliance on stochastic processes and lack of true understanding. Ethical implications, including bias and misuse, are explored, along with environmental costs. The text contrasts the human brain's complex temporalities and adaptability with the static nature of current AI models, which lack genuine long-term memory and continuous learning.", "raw": "This text critiques AI language models, particularly in education, arguing their use hinders deep learning, critical thinking, and social-emotional development despite automating tasks. It raises copyright concerns, highlighting lawsuits against companies like OpenAI and Microsoft. AI \"creativity\" is challenged, emphasizing its reliance on stochastic processes and lack of true understanding. Ethical implications, including bias and misuse, are explored, along with environmental costs. The text contrasts the human brain's complex temporalities and adaptability with the static nature of current AI models, which lack genuine long-term memory and continuous learning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Artificial Minds, Human Consequences: Unraveling AIโ€™s Impact on Education, Cognition, and Cultural Production https://empereur-pirate.medium.com/artificial-minds-human-consequences-unraveling-ais-impact-on-education-cognition-and-cultural-a503b88d4524 This text critiques AI language models, particularly in education, arguing their use hinders deep learning, critical thinking, and social-emotional development despite automating tasks. It raises copyright concerns, highlighting lawsuits against companies like OpenAI and Microsoft. AI "creativity" is challenged, emphasizing its reliance on stochastic processes and lack of true understanding. Ethical implications, including bias and misuse, are explored, along with environmental costs. The text contrasts the human brain's complex temporalities and adaptability with the static nature of current AI models, which lack genuine long-term memory and continuous learning.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678038324479-noauth.jpeg", "fullname": "Empereur Pirate", "name": "Empereur-Pirate", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-19T16:41:44.000Z
2024-10-19T16:41:44.754Z
[]
/posts/Empereur-Pirate/732259995611619
502
0
552560958530464
[ { "type": "text", "value": "Made a notable change to the TTS Arena fork. I do not think anyone is interested in which bottomfeeder TTS is better than another beside it. So one of the top 5 TTS is always chosen in a challenge for more scrutiny. Also these top 5 are taken from preliminary results.", "raw": "Made a notable change to the TTS Arena fork. I do not think anyone is interested in which bottomfeeder TTS is better than another beside it. So one of the top 5 TTS is always chosen in a challenge for more scrutiny. Also these top 5 are taken from preliminary results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Pendrokar/TTS-Spaces-Arena", "href": null, "resource": { "type": "space", "id": "Pendrokar/TTS-Spaces-Arena", "discussionNum": null }, "url": "https://huggingface.co/spaces/Pendrokar/TTS-Spaces-Arena", "code": null, "user": null, "label": null, "lang": null } ]
Made a notable change to the TTS Arena fork. I do not think anyone is interested in which bottomfeeder TTS is better than another beside it. So one of the top 5 TTS is always chosen in a challenge for more scrutiny. Also these top 5 are taken from preliminary results. https://huggingface.co/spaces/Pendrokar/TTS-Spaces-Arena
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d52e0c4e5642795617f668/ztXLrdFz3gkUJUIIQXfHo.png", "fullname": "Yanis L", "name": "Pendrokar", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "KingNish", "John6666" ], "count": 3 } ]
2024-10-19T13:26:24.000Z
2024-10-19T13:28:56.552Z
[]
/posts/Pendrokar/552560958530464
1,364
0
715838364745940
[ { "type": "text", "value": "SambaNova โ˜๏ธ ", "raw": "SambaNova โ˜๏ธ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โšก Inference API with cURL Demo: ", "raw": "โšก Inference API with cURL Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/sambanova-inference-api", "href": null, "resource": { "type": "space", "id": "prithivMLmods/sambanova-inference-api", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/sambanova-inference-api", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”—Sambanova API Documentation : (grab your APIs here) ", "raw": "๐Ÿ”—Sambanova API Documentation : (grab your APIs here) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://cloud.sambanova.ai/apis", "href": "https://cloud.sambanova.ai/apis", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nexport SAMBANOVA_API_KEY=<your token>\n```", "href": null, "resource": null, "url": null, "code": "export SAMBANOVA_API_KEY=<your token>", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sambanova's Inference API.", "raw": "Sambanova's Inference API.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\npip install sambanova-gradio\n```", "href": null, "resource": null, "url": null, "code": "pip install sambanova-gradio", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SambaNova X Gradio", "raw": "SambaNova X Gradio", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nimport gradio as gr\nimport sambanova_gradio\n\ngr.load(\n name='Meta-Llama-3.1-405B-Instruct',\n src=sambanova_gradio.registry,\n).launch()\n```", "href": null, "resource": null, "url": null, "code": "import gradio as gr\nimport sambanova_gradio\n\ngr.load(\n name='Meta-Llama-3.1-405B-Instruct',\n src=sambanova_gradio.registry,\n).launch()", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ƒ Documentation: ", "raw": "๐Ÿ“ƒ Documentation: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://community.sambanova.ai/docs", "href": "https://community.sambanova.ai/docs", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
SambaNova โ˜๏ธ โšก Inference API with cURL Demo: https://huggingface.co/spaces/prithivMLmods/sambanova-inference-api ๐Ÿ”—Sambanova API Documentation : (grab your APIs here) https://cloud.sambanova.ai/apis ``` export SAMBANOVA_API_KEY=<your token> ``` Sambanova's Inference API. ``` pip install sambanova-gradio ``` SambaNova X Gradio ``` import gradio as gr import sambanova_gradio gr.load( name='Meta-Llama-3.1-405B-Instruct', src=sambanova_gradio.registry, ).launch() ``` ๐Ÿ“ƒ Documentation: https://community.sambanova.ai/docs
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 393, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "John6666", "victor", "akhaliq", "AtAndDev", "prithivMLmods" ], "count": 5 } ]
2024-10-19T10:33:05.000Z
2024-10-19T10:35:16.262Z
[]
/posts/prithivMLmods/715838364745940
2,425
0
118977614113249
[ { "type": "text", "value": "I'm now working on finetuning of coding models. If you are GPU-hungry like me, you will find quantized models very helpful. But quantization for finetuning and inference are different and incompatible. So I made two collections here.", "raw": "I'm now working on finetuning of coding models. If you are GPU-hungry like me, you will find quantized models very helpful. But quantization for finetuning and inference are different and incompatible. So I made two collections here.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Inference (GGUF, via Ollama, CPU is enough)", "raw": "Inference (GGUF, via Ollama, CPU is enough)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/onekq-ai/ollama-ready-coding-models-67118c3cfa1af2cf04a926d6", "href": null, "resource": { "type": "collection", "id": "onekq-ai/ollama-ready-coding-models-67118c3cfa1af2cf04a926d6", "discussionNum": null }, "url": "https://huggingface.co/collections/onekq-ai/ollama-ready-coding-models-67118c3cfa1af2cf04a926d6", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Finetuning (Bitsandbytes, QLora, GPU is needed)", "raw": "Finetuning (Bitsandbytes, QLora, GPU is needed)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/onekq-ai/qlora-ready-coding-models-67118771ce001b8f4cf946b2", "href": null, "resource": { "type": "collection", "id": "onekq-ai/qlora-ready-coding-models-67118771ce001b8f4cf946b2", "discussionNum": null }, "url": "https://huggingface.co/collections/onekq-ai/qlora-ready-coding-models-67118771ce001b8f4cf946b2", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For quantization, the inference models are far more popular on HF than finetuning models. I use ", "raw": "For quantization, the inference models are far more popular on HF than finetuning models. I use ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/QuantFactory", "href": "https://huggingface.co/QuantFactory", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " to generate inference models (GGUF), and there are a few other choices.", "raw": " to generate inference models (GGUF), and there are a few other choices.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But there hasn't been such a service for finetuning models. DIY isn't too hard though. I made a few myself and you can find the script in the model cards. If the original model is small enough, you can even do it on a free T4 (available via Google Colab).", "raw": "But there hasn't been such a service for finetuning models. DIY isn't too hard though. I made a few myself and you can find the script in the model cards. If the original model is small enough, you can even do it on a free T4 (available via Google Colab).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you know a (small) coding model worthy of quantization, please let me know and I'd love to add it to the collections.", "raw": "If you know a (small) coding model worthy of quantization, please let me know and I'd love to add it to the collections.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I'm now working on finetuning of coding models. If you are GPU-hungry like me, you will find quantized models very helpful. But quantization for finetuning and inference are different and incompatible. So I made two collections here. Inference (GGUF, via Ollama, CPU is enough) https://huggingface.co/collections/onekq-ai/ollama-ready-coding-models-67118c3cfa1af2cf04a926d6 Finetuning (Bitsandbytes, QLora, GPU is needed) https://huggingface.co/collections/onekq-ai/qlora-ready-coding-models-67118771ce001b8f4cf946b2 For quantization, the inference models are far more popular on HF than finetuning models. I use https://huggingface.co/QuantFactory to generate inference models (GGUF), and there are a few other choices. But there hasn't been such a service for finetuning models. DIY isn't too hard though. I made a few myself and you can find the script in the model cards. If the original model is small enough, you can even do it on a free T4 (available via Google Colab). If you know a (small) coding model worthy of quantization, please let me know and I'd love to add it to the collections.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669dbd709a4bf63e08f1ddc2/aV10ZJPPzH5LbnHFZNqc7.png", "fullname": "Yi Cui", "name": "onekq", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "djuna", "John6666", "valentimarco", "RalFinger", "rajkumar321", "regisss" ], "count": 6 } ]
2024-10-19T04:05:19.000Z
2024-10-20T23:25:15.865Z
[]
/posts/onekq/118977614113249
1,834
0
258301984475643
[ { "type": "text", "value": "๐Ÿ“ข Excited to share that our studies ๐Ÿ“„ \"Large Language Models in Targeted Sentiment Analysis for Russian\" has recently become in ๐Ÿ“˜ Springer Lobachevskii Journal of Mathematics ๐Ÿฅณโœจ ...", "raw": "๐Ÿ“ข Excited to share that our studies ๐Ÿ“„ \"Large Language Models in Targeted Sentiment Analysis for Russian\" has recently become in ๐Ÿ“˜ Springer Lobachevskii Journal of Mathematics ๐Ÿฅณโœจ ...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“˜ ", "raw": "๐Ÿ“˜ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://link.springer.com/article/10.1134/S1995080224603758", "href": "https://link.springer.com/article/10.1134/S1995080224603758", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In this studies we provide such a diverse look and experiments over various ๐Ÿค– LLM models ๐Ÿค– scaled from 7B in two different modes: โ„๏ธ zero-shot and ๐Ÿ”ฅ fine-tuned (Flan-T5 only) using Three-Hop reasoning technique.", "raw": "In this studies we provide such a diverse look and experiments over various ๐Ÿค– LLM models ๐Ÿค– scaled from 7B in two different modes: โ„๏ธ zero-shot and ๐Ÿ”ฅ fine-tuned (Flan-T5 only) using Three-Hop reasoning technique.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We showcase the importance of performing:", "raw": "We showcase the importance of performing:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’š text translation into English", "raw": "๐Ÿ’š text translation into English", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’š application on Chain-of-Thought for Implicit Sentiment Analysis", "raw": "๐Ÿ’š application on Chain-of-Thought for Implicit Sentiment Analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More:", "raw": "More:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Arxiv: ", "raw": "๐Ÿ“„ Arxiv: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2404.12342", "href": "https://arxiv.org/abs/2404.12342", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง‘โ€๐Ÿ’ป๏ธ Code: ", "raw": "๐Ÿง‘โ€๐Ÿ’ป๏ธ Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "href": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค— Models: ", "raw": "๐Ÿค— Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2404.12342", "href": null, "resource": { "type": "paper", "id": "2404.12342", "discussionNum": null }, "url": "https://huggingface.co/papers/2404.12342", "code": null, "user": null, "label": "Large Language Models in Targeted Sentiment Analysis (2404.12342)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽฅ Video ", "raw": "๐ŸŽฅ Video ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@NLPSummit", "href": null, "resource": null, "url": null, "code": null, "user": "NLPSummit", "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=qawLJsRHzB4", "href": "https://www.youtube.com/watch?v=qawLJsRHzB4", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "THOR: ", "raw": "THOR: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/scofield7419/THOR-ISA", "href": "https://github.com/scofield7419/THOR-ISA", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ข Excited to share that our studies ๐Ÿ“„ "Large Language Models in Targeted Sentiment Analysis for Russian" has recently become in ๐Ÿ“˜ Springer Lobachevskii Journal of Mathematics ๐Ÿฅณโœจ ... ๐Ÿ“˜ https://link.springer.com/article/10.1134/S1995080224603758 In this studies we provide such a diverse look and experiments over various ๐Ÿค– LLM models ๐Ÿค– scaled from 7B in two different modes: โ„๏ธ zero-shot and ๐Ÿ”ฅ fine-tuned (Flan-T5 only) using Three-Hop reasoning technique. We showcase the importance of performing: ๐Ÿ’š text translation into English ๐Ÿ’š application on Chain-of-Thought for Implicit Sentiment Analysis More: ๐Ÿ“„ Arxiv: https://arxiv.org/abs/2404.12342 ๐Ÿง‘โ€๐Ÿ’ป๏ธ Code: https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework ๐Ÿค— Models: https://huggingface.co/papers/2404.12342 ๐ŸŽฅ Video @NLPSummit: https://www.youtube.com/watch?v=qawLJsRHzB4 THOR: https://github.com/scofield7419/THOR-ISA
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/Mhur9yOt815o2m0sL8piC.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T21:38:25.000Z
2024-10-18T21:38:25.524Z
[]
/posts/nicolay-r/258301984475643
683
0
847158812402940
[ { "type": "text", "value": "Good folks at ", "raw": "Good folks at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Apple", "href": null, "resource": null, "url": null, "code": null, "user": "Apple", "label": null, "lang": null }, { "type": "text", "value": " have developed a novel method called KV Prediction that significantly reduces the \"time to first token\" (TTFT) for on-device LLM inference.", "raw": " have developed a novel method called KV Prediction that significantly reduces the \"time to first token\" (TTFT) for on-device LLM inference.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some highlights of the paper:", "raw": "Some highlights of the paper:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Uses a small auxiliary transformer model to efficiently predict the KV cache of a larger base model", "raw": "โ€ข Uses a small auxiliary transformer model to efficiently predict the KV cache of a larger base model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Reduces TTFT by up to 4x while retaining 60-80% accuracy on benchmarks", "raw": "โ€ข Reduces TTFT by up to 4x while retaining 60-80% accuracy on benchmarks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Achieves Pareto-optimal efficiency-accuracy trade-off compared to baselines", "raw": "โ€ข Achieves Pareto-optimal efficiency-accuracy trade-off compared to baselines", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Demonstrates 15-50% relative accuracy improvements on TriviaQA at equal TTFT FLOP budgets", "raw": "โ€ข Demonstrates 15-50% relative accuracy improvements on TriviaQA at equal TTFT FLOP budgets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Shows up to 30% accuracy gains on HumanEval code completion at fixed TTFT FLOP counts", "raw": "โ€ข Shows up to 30% accuracy gains on HumanEval code completion at fixed TTFT FLOP counts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Validated on Apple M2 Pro CPU, proving FLOP gains translate to real-world speedups", "raw": "โ€ข Validated on Apple M2 Pro CPU, proving FLOP gains translate to real-world speedups", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So, how's it done?", "raw": "So, how's it done?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Based on the KV Prediction method described in the paper, here are the key steps for how it's done:", "raw": "Based on the KV Prediction method described in the paper, here are the key steps for how it's done:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Choose a base model and an auxiliary model:", "raw": "1. Choose a base model and an auxiliary model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The base model is a larger, pretrained transformer model that will be used for final generation.", "raw": "- The base model is a larger, pretrained transformer model that will be used for final generation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The auxiliary model is a smaller transformer model used to efficiently process the input prompt.", "raw": "- The auxiliary model is a smaller transformer model used to efficiently process the input prompt.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Design the KV predictor:", "raw": "2. Design the KV predictor:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create a set of learned linear projections to map from the auxiliary model's KV cache to the base model's KV cache.", "raw": "- Create a set of learned linear projections to map from the auxiliary model's KV cache to the base model's KV cache.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Define a mapping from auxiliary cache layers to base cache layers.", "raw": "- Define a mapping from auxiliary cache layers to base cache layers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Training process:", "raw": "3. Training process:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Pass input tokens through the auxiliary model to get its KV cache.", "raw": "- Pass input tokens through the auxiliary model to get its KV cache.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Use the KV predictor to generate a predicted KV cache for the base model.", "raw": "- Use the KV predictor to generate a predicted KV cache for the base model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Run the base model using the predicted KV cache and compute losses.", "raw": "- Run the base model using the predicted KV cache and compute losses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Backpropagate errors through the frozen base model to update the auxiliary model and KV predictor.", "raw": "- Backpropagate errors through the frozen base model to update the auxiliary model and KV predictor.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Inference process:", "raw": "4. Inference process:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Process the input prompt with the auxiliary model to get its KV cache.", "raw": "- Process the input prompt with the auxiliary model to get its KV cache.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Use the KV predictor to generate the predicted base model KV cache.", "raw": "- Use the KV predictor to generate the predicted base model KV cache.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Run a single token generation step with the base model using the predicted KV cache.", "raw": "- Run a single token generation step with the base model using the predicted KV cache.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Continue autoregressive generation with the base model as normal.", "raw": "- Continue autoregressive generation with the base model as normal.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Excited to hear your thoughts! ", "raw": "Excited to hear your thoughts! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks at @Apple have developed a novel method called KV Prediction that significantly reduces the "time to first token" (TTFT) for on-device LLM inference. Some highlights of the paper: โ€ข Uses a small auxiliary transformer model to efficiently predict the KV cache of a larger base model โ€ข Reduces TTFT by up to 4x while retaining 60-80% accuracy on benchmarks โ€ข Achieves Pareto-optimal efficiency-accuracy trade-off compared to baselines โ€ข Demonstrates 15-50% relative accuracy improvements on TriviaQA at equal TTFT FLOP budgets โ€ข Shows up to 30% accuracy gains on HumanEval code completion at fixed TTFT FLOP counts โ€ข Validated on Apple M2 Pro CPU, proving FLOP gains translate to real-world speedups So, how's it done? Based on the KV Prediction method described in the paper, here are the key steps for how it's done: 1. Choose a base model and an auxiliary model: - The base model is a larger, pretrained transformer model that will be used for final generation. - The auxiliary model is a smaller transformer model used to efficiently process the input prompt. 2. Design the KV predictor: - Create a set of learned linear projections to map from the auxiliary model's KV cache to the base model's KV cache. - Define a mapping from auxiliary cache layers to base cache layers. 3. Training process: - Pass input tokens through the auxiliary model to get its KV cache. - Use the KV predictor to generate a predicted KV cache for the base model. - Run the base model using the predicted KV cache and compute losses. - Backpropagate errors through the frozen base model to update the auxiliary model and KV predictor. 4. Inference process: - Process the input prompt with the auxiliary model to get its KV cache. - Use the KV predictor to generate the predicted base model KV cache. - Run a single token generation step with the base model using the predicted KV cache. - Continue autoregressive generation with the base model as normal. Excited to hear your thoughts!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/9Rzee8SmYr5F8LJkSBh6C.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "YaTharThShaRma999", "barakplasma", "KingNish", "myronkoch", "AtAndDev", "GoDjMike" ], "count": 7 } ]
2024-10-18T19:40:13.000Z
2024-10-18T19:40:13.419Z
[]
/posts/singhsidhukuldeep/847158812402940
1,736
0
744296414703460
[ { "type": "text", "value": "A new ", "raw": "A new ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`timm`", "href": null, "resource": null, "url": null, "code": "timm", "user": null, "label": null, "lang": null }, { "type": "text", "value": " release (1.0.11) is out now. A also wrote an article on one of the included models: ", "raw": " release (1.0.11) is out now. A also wrote an article on one of the included models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/rwightman/mambaout", "href": "https://huggingface.co/blog/rwightman/mambaout", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Featured in the release are:", "raw": "Featured in the release are:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * The MambaOut model, a cheeky arch inspired by SSM but without the SSM part, a ConvNeXt with gating.", "raw": " * The MambaOut model, a cheeky arch inspired by SSM but without the SSM part, a ConvNeXt with gating.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * Several timm trained MambaOut variations with arch tweaks and ImageNet-12k pretrain to verify scaling, supplement ported weights.", "raw": " * Several timm trained MambaOut variations with arch tweaks and ImageNet-12k pretrain to verify scaling, supplement ported weights.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * The smallest MobileNetV4, a 0.5x width scaled Conv-Small.", "raw": " * The smallest MobileNetV4, a 0.5x width scaled Conv-Small.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * Two impressive MobileNetV3 Large models outperforming all previous, using MNV4 Small recipe.", "raw": " * Two impressive MobileNetV3 Large models outperforming all previous, using MNV4 Small recipe.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * 'Zepto,' a new compact ConvNeXt variant even smaller than the previous Atto, 2.2M params, RMSNorm, and solid results for its size.", "raw": " * 'Zepto,' a new compact ConvNeXt variant even smaller than the previous Atto, 2.2M params, RMSNorm, and solid results for its size.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * Newly ported SigLIP SO400M/16 ViT multi-lingual weights, the largest i18n weights, prevous was B/16.", "raw": " * Newly ported SigLIP SO400M/16 ViT multi-lingual weights, the largest i18n weights, prevous was B/16.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * Two ImageNet-1k fine-tuned SigLIP SO400M models at 378x378", "raw": " * Two ImageNet-1k fine-tuned SigLIP SO400M models at 378x378", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * InternViT 300M weight port. A really solid ViT encoder distilled from OpenGVLab 6B VL model encoder.", "raw": " * InternViT 300M weight port. A really solid ViT encoder distilled from OpenGVLab 6B VL model encoder.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " * An assortment of very small, sub 1M param pretrained test models to improve library unit tests and serve low-resource applications.", "raw": " * An assortment of very small, sub 1M param pretrained test models to improve library unit tests and serve low-resource applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
A new `timm` release (1.0.11) is out now. A also wrote an article on one of the included models: https://huggingface.co/blog/rwightman/mambaout Featured in the release are: * The MambaOut model, a cheeky arch inspired by SSM but without the SSM part, a ConvNeXt with gating. * Several timm trained MambaOut variations with arch tweaks and ImageNet-12k pretrain to verify scaling, supplement ported weights. * The smallest MobileNetV4, a 0.5x width scaled Conv-Small. * Two impressive MobileNetV3 Large models outperforming all previous, using MNV4 Small recipe. * 'Zepto,' a new compact ConvNeXt variant even smaller than the previous Atto, 2.2M params, RMSNorm, and solid results for its size. * Newly ported SigLIP SO400M/16 ViT multi-lingual weights, the largest i18n weights, prevous was B/16. * Two ImageNet-1k fine-tuned SigLIP SO400M models at 378x378 * InternViT 300M weight port. A really solid ViT encoder distilled from OpenGVLab 6B VL model encoder. * An assortment of very small, sub 1M param pretrained test models to improve library unit tests and serve low-resource applications.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg", "fullname": "Ross Wightman", "name": "rwightman", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 221, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T18:37:04.000Z
2024-10-18T18:37:39.692Z
[]
/posts/rwightman/744296414703460
633
0
326102968998684
[ { "type": "text", "value": "China is advancing rapidly in AI technology while maintaining a strong focus on governance ๐Ÿ‡จ๐Ÿ‡ณ๐Ÿ“‘", "raw": "China is advancing rapidly in AI technology while maintaining a strong focus on governance ๐Ÿ‡จ๐Ÿ‡ณ๐Ÿ“‘", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We've collected key AI governance documents released since 2017 and will continue updating them in this organization on the hub ๐Ÿ‘‰China LLMs on Hugging Face", "raw": "We've collected key AI governance documents released since 2017 and will continue updating them in this organization on the hub ๐Ÿ‘‰China LLMs on Hugging Face", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœจ ", "raw": "โœจ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/zh-ai-community/china-ai-policy-research", "href": null, "resource": { "type": "space", "id": "zh-ai-community/china-ai-policy-research", "discussionNum": null }, "url": "https://huggingface.co/spaces/zh-ai-community/china-ai-policy-research", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Any feedback is welcome๐Ÿค—", "raw": "Any feedback is welcome๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
China is advancing rapidly in AI technology while maintaining a strong focus on governance ๐Ÿ‡จ๐Ÿ‡ณ๐Ÿ“‘ We've collected key AI governance documents released since 2017 and will continue updating them in this organization on the hub ๐Ÿ‘‰China LLMs on Hugging Face โœจ https://huggingface.co/spaces/zh-ai-community/china-ai-policy-research Any feedback is welcome๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a369d98c0c89dcae3b8329/6OUJ7Hc9T1jXynYH3FGaf.png", "fullname": "Adina Yakefu", "name": "AdinaY", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 240, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63a369d98c0c89dcae3b8329/mk4wMW64k62KPtlgt10aE.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "debsouryadatta", "YaTharThShaRma999", "blanchon", "tcy6", "AtAndDev", "mpieck", "TuringsSolutions" ], "count": 8 } ]
2024-10-18T16:37:51.000Z
2024-10-18T16:39:04.725Z
[]
/posts/AdinaY/326102968998684
2,231
0
773668577673507
[ { "type": "text", "value": "What a great day for Open Science! ", "raw": "What a great day for Open Science! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@AIatMeta", "href": null, "resource": null, "url": null, "code": null, "user": "AIatMeta", "label": null, "lang": null }, { "type": "text", "value": " released models, datasets, and code for many of its research artefacts! ๐Ÿ”ฅ", "raw": " released models, datasets, and code for many of its research artefacts! ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Meta Segment Anything Model 2.1: An updated checkpoint with improved results on visually similar objects, small objects and occlusion handling. A new developer suite will be added to make it easier for developers to build with SAM 2. ", "raw": "1. Meta Segment Anything Model 2.1: An updated checkpoint with improved results on visually similar objects, small objects and occlusion handling. A new developer suite will be added to make it easier for developers to build with SAM 2. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model checkpoints: ", "raw": "Model checkpoints: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/reach-vb/sam-21-6702d40defe7611a8bafa881", "href": null, "resource": { "type": "collection", "id": "reach-vb/sam-21-6702d40defe7611a8bafa881", "discussionNum": null }, "url": "https://huggingface.co/collections/reach-vb/sam-21-6702d40defe7611a8bafa881", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Layer Skip: Inference code and fine-tuned checkpoints demonstrating a new method for enhancing LLM performance.", "raw": "2. Layer Skip: Inference code and fine-tuned checkpoints demonstrating a new method for enhancing LLM performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model checkpoints: ", "raw": "Model checkpoints: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/facebook/layerskip-666b25c50c8ae90e1965727a", "href": null, "resource": { "type": "collection", "id": "facebook/layerskip-666b25c50c8ae90e1965727a", "discussionNum": null }, "url": "https://huggingface.co/collections/facebook/layerskip-666b25c50c8ae90e1965727a", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. SALSA: New code enables researchers to benchmark AI-based attacks to validate security for post-quantum cryptography. ", "raw": "3. SALSA: New code enables researchers to benchmark AI-based attacks to validate security for post-quantum cryptography. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Repo: ", "raw": "Repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/facebookresearch/LWE-benchmarking", "href": "https://github.com/facebookresearch/LWE-benchmarking", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Meta Lingua: A lightweight and self-contained codebase designed to train language models at scale.", "raw": "4. Meta Lingua: A lightweight and self-contained codebase designed to train language models at scale.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Repo: ", "raw": "Repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/facebookresearch/lingua", "href": "https://github.com/facebookresearch/lingua", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Meta Open Materials: New open source models and the largest dataset to accelerate AI-driven discovery of new inorganic materials.", "raw": "5. Meta Open Materials: New open source models and the largest dataset to accelerate AI-driven discovery of new inorganic materials.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model checkpoints: ", "raw": "Model checkpoints: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/fairchem/OMAT24", "href": null, "resource": { "type": "model", "id": "fairchem/OMAT24", "discussionNum": null }, "url": "https://huggingface.co/fairchem/OMAT24", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. MEXMA: A new research paper and code for our novel pre-trained cross-lingual sentence encoder covering 80 languages.", "raw": "6. MEXMA: A new research paper and code for our novel pre-trained cross-lingual sentence encoder covering 80 languages.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model checkpoint: ", "raw": "Model checkpoint: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/facebook/MEXMA", "href": null, "resource": { "type": "model", "id": "facebook/MEXMA", "discussionNum": null }, "url": "https://huggingface.co/facebook/MEXMA", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Self-Taught Evaluator: a new method for generating synthetic preference data to train reward models without relying on human annotations.", "raw": "7. Self-Taught Evaluator: a new method for generating synthetic preference data to train reward models without relying on human annotations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model checkpoint: ", "raw": "Model checkpoint: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/facebook/Self-taught-evaluator-llama3.1-70B", "href": null, "resource": { "type": "model", "id": "facebook/Self-taught-evaluator-llama3.1-70B", "discussionNum": null }, "url": "https://huggingface.co/facebook/Self-taught-evaluator-llama3.1-70B", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. Meta Spirit LM: An open-source language model for seamless speech and text integration.", "raw": "8. Meta Spirit LM: An open-source language model for seamless speech and text integration.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Repo: ", "raw": "Repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/facebookresearch/spiritlm", "href": "https://github.com/facebookresearch/spiritlm", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What a great day for Open Science! @AIatMeta released models, datasets, and code for many of its research artefacts! ๐Ÿ”ฅ 1. Meta Segment Anything Model 2.1: An updated checkpoint with improved results on visually similar objects, small objects and occlusion handling. A new developer suite will be added to make it easier for developers to build with SAM 2. Model checkpoints: https://huggingface.co/collections/reach-vb/sam-21-6702d40defe7611a8bafa881 2. Layer Skip: Inference code and fine-tuned checkpoints demonstrating a new method for enhancing LLM performance. Model checkpoints: https://huggingface.co/collections/facebook/layerskip-666b25c50c8ae90e1965727a 3. SALSA: New code enables researchers to benchmark AI-based attacks to validate security for post-quantum cryptography. Repo: https://github.com/facebookresearch/LWE-benchmarking 4. Meta Lingua: A lightweight and self-contained codebase designed to train language models at scale. Repo: https://github.com/facebookresearch/lingua 5. Meta Open Materials: New open source models and the largest dataset to accelerate AI-driven discovery of new inorganic materials. Model checkpoints: https://huggingface.co/fairchem/OMAT24 6. MEXMA: A new research paper and code for our novel pre-trained cross-lingual sentence encoder covering 80 languages. Model checkpoint: https://huggingface.co/facebook/MEXMA 7. Self-Taught Evaluator: a new method for generating synthetic preference data to train reward models without relying on human annotations. Model checkpoint: https://huggingface.co/facebook/Self-taught-evaluator-llama3.1-70B 8. Meta Spirit LM: An open-source language model for seamless speech and text integration. Repo: https://github.com/facebookresearch/spiritlm
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61b85ce86eb1f2c5e6233736/xJ9Epls05J9cKyBgBHHUF.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "abidlabs", "John6666", "pcuenq", "clem", "blanchon", "nbroad", "kuotient" ], "count": 7 }, { "reaction": "๐Ÿš€", "users": [ "cfahlgren1", "clem", "blanchon", "nbroad" ], "count": 4 } ]
2024-10-18T16:37:32.000Z
2024-11-05T15:36:46.713Z
[ { "avatarUrl": "/avatars/716b6a7d1094c8036b2a8a7b9063e8aa.svg", "fullname": "Julien BLANCHON", "name": "blanchon", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 70, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/reach-vb/773668577673507
2,414
3
394802699346452
[ { "type": "text", "value": "๐Ÿ•ต๏ธโ€โ™‚๏ธ Letโ€™s investigate NVIDIA's Llama-3.1-Nemotron-70B-Instruct-HF model performance - great analysis by ", "raw": "๐Ÿ•ต๏ธโ€โ™‚๏ธ Letโ€™s investigate NVIDIA's Llama-3.1-Nemotron-70B-Instruct-HF model performance - great analysis by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@alozowski", "href": null, "resource": null, "url": null, "code": null, "user": "alozowski", "label": null, "lang": null }, { "type": "text", "value": "!", "raw": "!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/ailozovskaya/status/1847278827925291370", "href": "https://x.com/ailozovskaya/status/1847278827925291370", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ•ต๏ธโ€โ™‚๏ธ Letโ€™s investigate NVIDIA's Llama-3.1-Nemotron-70B-Instruct-HF model performance - great analysis by @alozowski! https://x.com/ailozovskaya/status/1847278827925291370
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63f5010dfcf95ecac2ad8652/vmRox4fcHMjT1y2bidjOL.jpeg", "fullname": "Alina Lozovskaya", "name": "alozowski", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 55 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T15:41:21.000Z
2024-10-18T15:41:21.585Z
[]
/posts/clem/394802699346452
778
0
416848702665970
[ { "type": "mention", "value": null, "raw": "@amrattacker", "href": null, "resource": null, "url": null, "code": null, "user": "amrattacker", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@amrvictim", "href": null, "resource": null, "url": null, "code": null, "user": "amrvictim", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"><img src=x onerror=alert(document.cookie)>", "raw": "\"><img src=x onerror=alert(document.cookie)>", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "<a href=https://hackerone.com> click here </a>", "raw": "<a href=https://hackerone.com> click here </a>", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@amrattacker @amrvictim "><img src=x onerror=alert(document.cookie)> <a href=https://hackerone.com> click here </a>
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Kqipf6zyrc5zWKTv_nsBM.jpeg", "fullname": "amr victim", "name": "amrvictim", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/67034187bc00379a3b131883/aI7OClaJB2DZx4EPP-4oa.jpeg" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Z55fnybB2ULlFIa3VDCNA.jpeg", "fullname": "amrattacker", "name": "amrattacker", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Kqipf6zyrc5zWKTv_nsBM.jpeg", "fullname": "amr victim", "name": "amrvictim", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[]
2024-10-18T14:47:23.000Z
2024-11-04T02:00:29.349Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Z55fnybB2ULlFIa3VDCNA.jpeg", "fullname": "amrattacker", "name": "amrattacker", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Kqipf6zyrc5zWKTv_nsBM.jpeg", "fullname": "amr victim", "name": "amrvictim", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/amrvictim/416848702665970
361
7
570727690844838
[ { "type": "text", "value": "๐Ÿ“ˆ Increase the quality of your RAG with a simple Linear Layer! No need to change your embedding model (keep that old OpenAI API).", "raw": "๐Ÿ“ˆ Increase the quality of your RAG with a simple Linear Layer! No need to change your embedding model (keep that old OpenAI API).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Introducing EmbeddingAlign RAG, a novel approach to improve Retrieval-Augmented Generation (RAG) systems.", "raw": "Introducing EmbeddingAlign RAG, a novel approach to improve Retrieval-Augmented Generation (RAG) systems.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key highlights:", "raw": "Key highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Uses a simple linear transformation on existing embeddings", "raw": "- Uses a simple linear transformation on existing embeddings", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Boosts hit rate from 89% to 95% on real-world examples", "raw": "- Boosts hit rate from 89% to 95% on real-world examples", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Minor increase on latency (less than 10ms)", "raw": "- Minor increase on latency (less than 10ms)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Works on top of blackbox embedding models (Mistral AI, OpenAI, Cohere,...)", "raw": "- Works on top of blackbox embedding models (Mistral AI, OpenAI, Cohere,...)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- No dataset needed (just your documents)", "raw": "- No dataset needed (just your documents)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Train easily on CPU", "raw": "- Train easily on CPU", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿค— Read the full article here on HF: ", "raw": "๐Ÿค— Read the full article here on HF: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/PLB/embedding-align-rag", "href": "https://huggingface.co/blog/PLB/embedding-align-rag", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ˆ Increase the quality of your RAG with a simple Linear Layer! No need to change your embedding model (keep that old OpenAI API). Introducing EmbeddingAlign RAG, a novel approach to improve Retrieval-Augmented Generation (RAG) systems. Key highlights: - Uses a simple linear transformation on existing embeddings - Boosts hit rate from 89% to 95% on real-world examples - Minor increase on latency (less than 10ms) - Works on top of blackbox embedding models (Mistral AI, OpenAI, Cohere,...) - No dataset needed (just your documents) - Train easily on CPU ๐Ÿค— Read the full article here on HF: https://huggingface.co/blog/PLB/embedding-align-rag
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/636902eda256bbb73f7c149a/qt3J96eNDQET3jva7mVyu.jpeg", "fullname": "Pierre-Louis B", "name": "PLB", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T14:12:31.000Z
2024-10-18T15:28:01.374Z
[ { "avatarUrl": "/avatars/10f68c52b11820df31d4022e8cc320f5.svg", "fullname": "Ersi Ni", "name": "nilbot", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/636902eda256bbb73f7c149a/qt3J96eNDQET3jva7mVyu.jpeg", "fullname": "Pierre-Louis B", "name": "PLB", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false } ]
/posts/PLB/570727690844838
366
2
846070753492070
[ { "type": "text", "value": "๐—›๐—ผ๐˜„ ๐˜๐—ผ ๐—ฟ๐—ฒ-๐—ฟ๐—ฎ๐—ป๐—ธ ๐˜†๐—ผ๐˜‚๐—ฟ ๐˜€๐—ป๐—ถ๐—ฝ๐—ฝ๐—ฒ๐˜๐˜€ ๐—ถ๐—ป ๐—ฅ๐—”๐—š โ‡’ ColBERT, Rerankers, Cross-Encoders", "raw": "๐—›๐—ผ๐˜„ ๐˜๐—ผ ๐—ฟ๐—ฒ-๐—ฟ๐—ฎ๐—ป๐—ธ ๐˜†๐—ผ๐˜‚๐—ฟ ๐˜€๐—ป๐—ถ๐—ฝ๐—ฝ๐—ฒ๐˜๐˜€ ๐—ถ๐—ป ๐—ฅ๐—”๐—š โ‡’ ColBERT, Rerankers, Cross-Encoders", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Letโ€™s say youโ€™re doing RAG, and in an effort to improve performance, you try to rerank a few possible source snippets by their relevancy to a query.", "raw": "Letโ€™s say youโ€™re doing RAG, and in an effort to improve performance, you try to rerank a few possible source snippets by their relevancy to a query.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How can you score similarity between your query and any source document? ๐Ÿค” ๐Ÿ“„ โ†”๏ธ ๐Ÿ“‘", "raw": "How can you score similarity between your query and any source document? ๐Ÿค” ๐Ÿ“„ โ†”๏ธ ๐Ÿ“‘", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿญ. ๐—๐˜‚๐˜€๐˜ ๐˜‚๐˜€๐—ฒ ๐—ฒ๐—บ๐—ฏ๐—ฒ๐—ฑ๐—ฑ๐—ถ๐—ป๐—ด๐˜€ : ๐—ก๐—ผ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป ๐ŸŽ๏ธ", "raw": "๐Ÿญ. ๐—๐˜‚๐˜€๐˜ ๐˜‚๐˜€๐—ฒ ๐—ฒ๐—บ๐—ฏ๐—ฒ๐—ฑ๐—ฑ๐—ถ๐—ป๐—ด๐˜€ : ๐—ก๐—ผ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป ๐ŸŽ๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This means that you encode each token from both the query and the doc as separate vectors, then average the tokens of each separately to get in total 2 vectors, then you compute similarity via cosine or something.", "raw": "This means that you encode each token from both the query and the doc as separate vectors, then average the tokens of each separately to get in total 2 vectors, then you compute similarity via cosine or something.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Notable examples: Check the top of the MTEB leaderboard!", "raw": "โžก๏ธ Notable examples: Check the top of the MTEB leaderboard!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฎ. ๐—Ÿ๐—ฎ๐˜๐—ฒ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐˜๐—ต๐—ถ๐˜€ ๐—ถ๐˜€ ๐—–๐—ผ๐—น๐—•๐—˜๐—ฅ๐—ง ๐Ÿƒ", "raw": "๐Ÿฎ. ๐—Ÿ๐—ฎ๐˜๐—ฒ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐˜๐—ต๐—ถ๐˜€ ๐—ถ๐˜€ ๐—–๐—ผ๐—น๐—•๐—˜๐—ฅ๐—ง ๐Ÿƒ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "These encode each token from both query and doc as separate vectors as before, but compare all together without previously averaging them and losing information.", "raw": "These encode each token from both query and doc as separate vectors as before, but compare all together without previously averaging them and losing information.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is more accurate than no-interaction but also slower because you have to compare n*m vectors instead of 2. At least you can store documents in memory. And ColBERT has some optimisations like pooling to be faster.", "raw": "This is more accurate than no-interaction but also slower because you have to compare n*m vectors instead of 2. At least you can store documents in memory. And ColBERT has some optimisations like pooling to be faster.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Notable examples: ColBERTv2, mxbai-colbert-large-v1, jina-colbert-v2", "raw": "โžก๏ธ Notable examples: ColBERTv2, mxbai-colbert-large-v1, jina-colbert-v2", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿฏ. ๐—˜๐—ฎ๐—ฟ๐—น๐˜† ๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐—–๐—ฟ๐—ผ๐˜€๐˜€-๐—ฒ๐—ป๐—ฐ๐—ผ๐—ฑ๐—ฒ๐—ฟ๐˜€ ๐Ÿ‹๏ธ", "raw": "๐Ÿฏ. ๐—˜๐—ฎ๐—ฟ๐—น๐˜† ๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐—–๐—ฟ๐—ผ๐˜€๐˜€-๐—ฒ๐—ป๐—ฐ๐—ผ๐—ฑ๐—ฒ๐—ฟ๐˜€ ๐Ÿ‹๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Basically you run the concatenated query + document in a model to get a final score.", "raw": "Basically you run the concatenated query + document in a model to get a final score.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is the most accurate, but also the slowest since it gets really long when you have many docs to rerank! And you cannot pre-store embeddings.", "raw": "This is the most accurate, but also the slowest since it gets really long when you have many docs to rerank! And you cannot pre-store embeddings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Notable examples: MixedBread or Jina AI rerankers!", "raw": "โžก๏ธ Notable examples: MixedBread or Jina AI rerankers!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ So what you choose is a trade-off between speed and accuracy: I think ColBERT is often a really good choice!", "raw": "๐Ÿš€ So what you choose is a trade-off between speed and accuracy: I think ColBERT is often a really good choice!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Based on this great post by Jina AI ๐Ÿ‘‰ ", "raw": "Based on this great post by Jina AI ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://jina.ai/news/what-is-colbert-and-late-interaction-and-why-they-matter", "href": "https://jina.ai/news/what-is-colbert-and-late-interaction-and-why-they-matter", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐—›๐—ผ๐˜„ ๐˜๐—ผ ๐—ฟ๐—ฒ-๐—ฟ๐—ฎ๐—ป๐—ธ ๐˜†๐—ผ๐˜‚๐—ฟ ๐˜€๐—ป๐—ถ๐—ฝ๐—ฝ๐—ฒ๐˜๐˜€ ๐—ถ๐—ป ๐—ฅ๐—”๐—š โ‡’ ColBERT, Rerankers, Cross-Encoders Letโ€™s say youโ€™re doing RAG, and in an effort to improve performance, you try to rerank a few possible source snippets by their relevancy to a query. How can you score similarity between your query and any source document? ๐Ÿค” ๐Ÿ“„ โ†”๏ธ ๐Ÿ“‘ ๐Ÿญ. ๐—๐˜‚๐˜€๐˜ ๐˜‚๐˜€๐—ฒ ๐—ฒ๐—บ๐—ฏ๐—ฒ๐—ฑ๐—ฑ๐—ถ๐—ป๐—ด๐˜€ : ๐—ก๐—ผ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป ๐ŸŽ๏ธ This means that you encode each token from both the query and the doc as separate vectors, then average the tokens of each separately to get in total 2 vectors, then you compute similarity via cosine or something. โžก๏ธ Notable examples: Check the top of the MTEB leaderboard! ๐Ÿฎ. ๐—Ÿ๐—ฎ๐˜๐—ฒ-๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐˜๐—ต๐—ถ๐˜€ ๐—ถ๐˜€ ๐—–๐—ผ๐—น๐—•๐—˜๐—ฅ๐—ง ๐Ÿƒ These encode each token from both query and doc as separate vectors as before, but compare all together without previously averaging them and losing information. This is more accurate than no-interaction but also slower because you have to compare n*m vectors instead of 2. At least you can store documents in memory. And ColBERT has some optimisations like pooling to be faster. โžก๏ธ Notable examples: ColBERTv2, mxbai-colbert-large-v1, jina-colbert-v2 ๐Ÿฏ. ๐—˜๐—ฎ๐—ฟ๐—น๐˜† ๐—ถ๐—ป๐˜๐—ฒ๐—ฟ๐—ฎ๐—ฐ๐˜๐—ถ๐—ผ๐—ป: ๐—–๐—ฟ๐—ผ๐˜€๐˜€-๐—ฒ๐—ป๐—ฐ๐—ผ๐—ฑ๐—ฒ๐—ฟ๐˜€ ๐Ÿ‹๏ธ Basically you run the concatenated query + document in a model to get a final score. This is the most accurate, but also the slowest since it gets really long when you have many docs to rerank! And you cannot pre-store embeddings. โžก๏ธ Notable examples: MixedBread or Jina AI rerankers! ๐Ÿš€ So what you choose is a trade-off between speed and accuracy: I think ColBERT is often a really good choice! Based on this great post by Jina AI ๐Ÿ‘‰ https://jina.ai/news/what-is-colbert-and-late-interaction-and-why-they-matter
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/rwnt4ya3fktet57eHEuCa.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "wsuff" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "FM-1976", "roger-temp" ], "count": 2 } ]
2024-10-18T13:40:31.000Z
2024-10-18T13:40:31.354Z
[]
/posts/m-ric/846070753492070
859
0
830635253975310
[ { "type": "text", "value": "It's raining depth estimation models โ˜”๏ธ", "raw": "It's raining depth estimation models โ˜”๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DepthPro is a zero-shot depth estimation model by Apple, it's fast, sharp and accurate ๐Ÿ”ฅ ", "raw": "DepthPro is a zero-shot depth estimation model by Apple, it's fast, sharp and accurate ๐Ÿ”ฅ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/akhaliq/depth-pro", "href": null, "resource": { "type": "space", "id": "akhaliq/depth-pro", "discussionNum": null }, "url": "https://huggingface.co/spaces/akhaliq/depth-pro", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/apple/DepthPro", "href": null, "resource": { "type": "model", "id": "apple/DepthPro", "discussionNum": null }, "url": "https://huggingface.co/apple/DepthPro", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper page: ", "raw": "Paper page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.02073", "href": null, "resource": { "type": "paper", "id": "2410.02073", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.02073", "code": null, "user": null, "label": "Depth Pro: Sharp Monocular Metric Depth in Less Than a Second (2410.02073)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The model consists of two encoders: an encoder for patches and an image encoder ๐Ÿ–ผ๏ธ The outputs of both are merged to decode to depth maps and get the focal length. ", "raw": "The model consists of two encoders: an encoder for patches and an image encoder ๐Ÿ–ผ๏ธ The outputs of both are merged to decode to depth maps and get the focal length. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The model outperforms the previous state-of-the-art models in average of various benchmarks ๐Ÿ“‘", "raw": "The model outperforms the previous state-of-the-art models in average of various benchmarks ๐Ÿ“‘", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
It's raining depth estimation models โ˜”๏ธ DepthPro is a zero-shot depth estimation model by Apple, it's fast, sharp and accurate ๐Ÿ”ฅ Demo: https://huggingface.co/spaces/akhaliq/depth-pro Model: https://huggingface.co/apple/DepthPro Paper page: https://huggingface.co/papers/2410.02073 The model consists of two encoders: an encoder for patches and an image encoder ๐Ÿ–ผ๏ธ The outputs of both are merged to decode to depth maps and get the focal length. The model outperforms the previous state-of-the-art models in average of various benchmarks ๐Ÿ“‘
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/AMLxOKR_gAudACdg2KKPB.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "victor", "YaTharThShaRma999", "John6666", "baratpaim", "adorkin", "mpottinger", "AtAndDev" ], "count": 7 }, { "reaction": "๐Ÿš€", "users": [ "ahmed-masry", "AtAndDev" ], "count": 2 } ]
2024-10-18T10:28:40.000Z
2024-10-18T10:28:40.931Z
[]
/posts/merve/830635253975310
1,962
0
181659368575031
[ { "type": "text", "value": "๐Ÿ“ฃ ๐š‘๐šž๐š๐š๐š’๐š—๐š๐š๐šŠ๐šŒ๐šŽ_๐š‘๐šž๐š‹ v0.26.0 is out with some new features and improvements!", "raw": "๐Ÿ“ฃ ๐š‘๐šž๐š๐š๐š’๐š—๐š๐š๐šŠ๐šŒ๐šŽ_๐š‘๐šž๐š‹ v0.26.0 is out with some new features and improvements!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โœจ ๐—ง๐—ผ๐—ฝ ๐—›๐—ถ๐—ด๐—ต๐—น๐—ถ๐—ด๐—ต๐˜๐˜€:", "raw": "โœจ ๐—ง๐—ผ๐—ฝ ๐—›๐—ถ๐—ด๐—ต๐—น๐—ถ๐—ด๐—ต๐˜๐˜€:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ”ย Multiple access tokens support: Easily manage multiple access tokens with new CLI commands. Perfect for handling multiple tokens with specific permissions in production or when collaborating with external teams.", "raw": "- ๐Ÿ”ย Multiple access tokens support: Easily manage multiple access tokens with new CLI commands. Perfect for handling multiple tokens with specific permissions in production or when collaborating with external teams.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ–ผ๏ธ Conversational VLMs inference is now supported withย InferenceClient's chat completion!", "raw": "- ๐Ÿ–ผ๏ธ Conversational VLMs inference is now supported withย InferenceClient's chat completion!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ๐Ÿ“„ Daily Papers API: Seamlessly search and retrieve detailed paper information from the Hub!", "raw": "- ๐Ÿ“„ Daily Papers API: Seamlessly search and retrieve detailed paper information from the Hub!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Weโ€™ve also introduced multiple bug fixes and quality-of-life improvements - thanks to the awesome contributions from our community! ๐Ÿค—", "raw": "Weโ€™ve also introduced multiple bug fixes and quality-of-life improvements - thanks to the awesome contributions from our community! ๐Ÿค—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the release notes here: ", "raw": "Check out the release notes here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/9", "href": null, "resource": { "type": "space", "id": "Wauplin/huggingface_hub", "discussionNum": 9 }, "url": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/9", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "and you can try it out now ๐Ÿ‘‡", "raw": "and you can try it out now ๐Ÿ‘‡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\npip install huggingface_hub==0.26.0\n```", "href": null, "resource": null, "url": null, "code": "pip install huggingface_hub==0.26.0", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ฃ ๐š‘๐šž๐š๐š๐š’๐š—๐š๐š๐šŠ๐šŒ๐šŽ_๐š‘๐šž๐š‹ v0.26.0 is out with some new features and improvements! โœจ ๐—ง๐—ผ๐—ฝ ๐—›๐—ถ๐—ด๐—ต๐—น๐—ถ๐—ด๐—ต๐˜๐˜€: - ๐Ÿ”ย Multiple access tokens support: Easily manage multiple access tokens with new CLI commands. Perfect for handling multiple tokens with specific permissions in production or when collaborating with external teams. - ๐Ÿ–ผ๏ธ Conversational VLMs inference is now supported withย InferenceClient's chat completion! - ๐Ÿ“„ Daily Papers API: Seamlessly search and retrieve detailed paper information from the Hub! Weโ€™ve also introduced multiple bug fixes and quality-of-life improvements - thanks to the awesome contributions from our community! ๐Ÿค— Check out the release notes here: https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/9 and you can try it out now ๐Ÿ‘‡ ``` pip install huggingface_hub==0.26.0 ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6192895f3b8aa351a46fadfd/b_MkVyuYPgM_WOEKjpNdd.jpeg", "fullname": "Celina", "name": "celinah", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 14, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6192895f3b8aa351a46fadfd/rvfOrjcWcCkmc6jTKOz5C.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "Chris4K", "victor", "asoria", "John6666", "AtAndDev", "Nymbo", "louisbrulenaudet" ], "count": 7 }, { "reaction": "๐Ÿš€", "users": [ "John6666", "AtAndDev", "Nymbo" ], "count": 3 }, { "reaction": "๐Ÿค—", "users": [ "John6666", "AtAndDev", "Nymbo" ], "count": 3 }, { "reaction": "โž•", "users": [ "John6666", "AtAndDev", "Nymbo" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "John6666", "AtAndDev", "Nymbo" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "John6666", "AtAndDev", "Nymbo" ], "count": 3 } ]
2024-10-18T09:09:38.000Z
2024-10-18T09:11:04.361Z
[]
/posts/celinah/181659368575031
1,069
0
150496682999597
[ { "type": "text", "value": "if you're encountering 500 errors on spaces that seem to work otherwise , kindly consider screenshotting and sharing the link here : ", "raw": "if you're encountering 500 errors on spaces that seem to work otherwise , kindly consider screenshotting and sharing the link here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://discord.com/channels/879548962464493619/1295847667515129877", "href": "https://discord.com/channels/879548962464493619/1295847667515129877", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
if you're encountering 500 errors on spaces that seem to work otherwise , kindly consider screenshotting and sharing the link here : https://discord.com/channels/879548962464493619/1295847667515129877
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "victor", "John6666", "Nymbo" ], "count": 3 }, { "reaction": "๐Ÿ˜”", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T08:37:28.000Z
2024-11-04T12:40:19.222Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false } ]
/posts/Tonic/150496682999597
972
7
100068424379559
[ { "type": "text", "value": "๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ", "raw": "๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ", "raw": "Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ Check it out: ", "raw": "๐Ÿ‘‰ Check it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/hbseong/HarmAug-Guard", "href": null, "resource": { "type": "model", "id": "hbseong/HarmAug-Guard", "discussionNum": null }, "url": "https://huggingface.co/hbseong/HarmAug-Guard", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก)", "raw": " (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More details in our paper: ", "raw": "More details in our paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2410.01524", "href": "https://arxiv.org/abs/2410.01524", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿ“œ", "raw": " ๐Ÿ“œ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning ", "raw": "#HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ ๐Ÿ‘‰ Check it out: https://huggingface.co/hbseong/HarmAug-Guard (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก) More details in our paper: https://arxiv.org/abs/2410.01524 ๐Ÿ“œ #HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning
{ "avatarUrl": "/avatars/6cda37befc873a92ed6d5dcba507954a.svg", "fullname": "Haebin Seong", "name": "hbseong", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "Seanie-lee", "hbseong", "John6666", "adorkin", "AtAndDev" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "FM-1976", "AtAndDev", "TuringsSolutions", "hbseong" ], "count": 4 } ]
2024-10-18T08:01:42.000Z
2024-10-18T08:04:30.930Z
[]
/posts/hbseong/100068424379559
1,190
0
541340656451476
[ { "type": "text", "value": "๐Ÿš€ Child Safe Chatbot (Experimental)Based on google/shieldgemma-9b. Currently in the experimental phase, this configuration could be a starting point to consolidate an LLM that meets the requirements to be safe for minors.", "raw": "๐Ÿš€ Child Safe Chatbot (Experimental)Based on google/shieldgemma-9b. Currently in the experimental phase, this configuration could be a starting point to consolidate an LLM that meets the requirements to be safe for minors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ ", "raw": "๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/nmarafo/Child-Safe-Chatbot", "href": null, "resource": { "type": "space", "id": "nmarafo/Child-Safe-Chatbot", "discussionNum": null }, "url": "https://huggingface.co/spaces/nmarafo/Child-Safe-Chatbot", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#EdTech #ChildSafety #AI #Education #Experimental", "raw": "#EdTech #ChildSafety #AI #Education #Experimental", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Child Safe Chatbot (Experimental)Based on google/shieldgemma-9b. Currently in the experimental phase, this configuration could be a starting point to consolidate an LLM that meets the requirements to be safe for minors. ๐Ÿ‘‰ https://huggingface.co/spaces/nmarafo/Child-Safe-Chatbot #EdTech #ChildSafety #AI #Education #Experimental
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1676376697215-noauth.jpeg", "fullname": "Norberto Martรญn Afonso", "name": "nmarafo", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-18T07:10:49.000Z
2024-10-18T07:10:49.781Z
[]
/posts/nmarafo/541340656451476
377
0
316377868914432
[ { "type": "text", "value": "๐Ÿš€ Introducing ColFlor: An Efficient, OCR-Free Vision-Language Document Retrieval Model ๐ŸŒŸ", "raw": "๐Ÿš€ Introducing ColFlor: An Efficient, OCR-Free Vision-Language Document Retrieval Model ๐ŸŒŸ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Earlier this year, ColPali revolutionized document retrieval by eliminating the need for error-prone OCR pipelines. Instead, it directly processes the document images. However, with its 3 billion parameters, ColPali is computationally heavy for large-scale applications.", "raw": "Earlier this year, ColPali revolutionized document retrieval by eliminating the need for error-prone OCR pipelines. Instead, it directly processes the document images. However, with its 3 billion parameters, ColPali is computationally heavy for large-scale applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thatโ€™s where ColFlor comes inโ€”a smaller, faster alternative! ๐ŸŽ‰ At 17x smaller than ColPali, ColFlor offers a more efficient, OCR-free document retrieval solution, making it ideal for users with limited computing resources (GPU Poor). ๐Ÿ’ก", "raw": "Thatโ€™s where ColFlor comes inโ€”a smaller, faster alternative! ๐ŸŽ‰ At 17x smaller than ColPali, ColFlor offers a more efficient, OCR-free document retrieval solution, making it ideal for users with limited computing resources (GPU Poor). ๐Ÿ’ก", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Highlights:", "raw": "Key Highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  174M parameters (vs. 3B for ColPali)", "raw": "๐Ÿง  174M parameters (vs. 3B for ColPali)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โšก 9.8x faster query encoding, 5.25x faster image encoding", "raw": "โšก 9.8x faster query encoding, 5.25x faster image encoding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“‰ Only 1.8% performance drop on text-rich English documents", "raw": "๐Ÿ“‰ Only 1.8% performance drop on text-rich English documents", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the full blog post for more insights on modeling, training, and evaluations across various document retrieval tasks! ๐Ÿš€", "raw": "Check out the full blog post for more insights on modeling, training, and evaluations across various document retrieval tasks! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also, feel free to try our demo on huggingface ๐Ÿค— ", "raw": "Also, feel free to try our demo on huggingface ๐Ÿค— ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”— Resources:", "raw": "๐Ÿ”— Resources:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“„ Blog post: ", "raw": "๐Ÿ“„ Blog post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/ahmed-masry/colflor", "href": "https://huggingface.co/blog/ahmed-masry/colflor", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  Model: ", "raw": "๐Ÿง  Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ahmed-masry/ColFlor", "href": null, "resource": { "type": "model", "id": "ahmed-masry/ColFlor", "discussionNum": null }, "url": "https://huggingface.co/ahmed-masry/ColFlor", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ’ป Demo: ", "raw": "๐Ÿ’ป Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/ahmed-masry/ColFlor-Demo", "href": null, "resource": { "type": "space", "id": "ahmed-masry/ColFlor-Demo", "discussionNum": null }, "url": "https://huggingface.co/spaces/ahmed-masry/ColFlor-Demo", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‹๏ธโ€โ™‚๏ธ Training code: ", "raw": "๐Ÿ‹๏ธโ€โ™‚๏ธ Training code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AhmedMasryKU/colflor", "href": "https://github.com/AhmedMasryKU/colflor", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Evaluation code: ", "raw": "๐Ÿ“Š Evaluation code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AhmedMasryKU/vidore-benchmark-colflor", "href": "https://github.com/AhmedMasryKU/vidore-benchmark-colflor", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿš€ Introducing ColFlor: An Efficient, OCR-Free Vision-Language Document Retrieval Model ๐ŸŒŸ Earlier this year, ColPali revolutionized document retrieval by eliminating the need for error-prone OCR pipelines. Instead, it directly processes the document images. However, with its 3 billion parameters, ColPali is computationally heavy for large-scale applications. Thatโ€™s where ColFlor comes inโ€”a smaller, faster alternative! ๐ŸŽ‰ At 17x smaller than ColPali, ColFlor offers a more efficient, OCR-free document retrieval solution, making it ideal for users with limited computing resources (GPU Poor). ๐Ÿ’ก Key Highlights: ๐Ÿง  174M parameters (vs. 3B for ColPali) โšก 9.8x faster query encoding, 5.25x faster image encoding ๐Ÿ“‰ Only 1.8% performance drop on text-rich English documents Check out the full blog post for more insights on modeling, training, and evaluations across various document retrieval tasks! ๐Ÿš€ Also, feel free to try our demo on huggingface ๐Ÿค— ๐Ÿ”— Resources: ๐Ÿ“„ Blog post: https://huggingface.co/blog/ahmed-masry/colflor ๐Ÿง  Model: https://huggingface.co/ahmed-masry/ColFlor ๐Ÿ’ป Demo: https://huggingface.co/spaces/ahmed-masry/ColFlor-Demo ๐Ÿ‹๏ธโ€โ™‚๏ธ Training code: https://github.com/AhmedMasryKU/colflor ๐Ÿ“Š Evaluation code: https://github.com/AhmedMasryKU/vidore-benchmark-colflor
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63efd75a5c2ceb16fc6e98fc/qoA4LKuLTEr7hx90i90UK.jpeg", "fullname": "Ahmed Masry", "name": "ahmed-masry", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 42, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Chris4K" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "adorkin" ], "count": 1 } ]
2024-10-18T04:17:21.000Z
2024-10-18T04:17:21.070Z
[]
/posts/ahmed-masry/316377868914432
1,128
0
510101665347796
[ { "type": "text", "value": "I built a Hyper Dimensional Computing (HDC) Encoder and Decoder and I also built out all of the code needed to replace the Encoder and Decoder of a Llama model with this HDC model, then train the Llama model on the HDC Encoder/Decoder. All MIT licensed. Here is a video where I break it all down. I can answer any questions about this project or help anyone out where I can. I am not a super developer or anything and I don't have access to enough compute to train this on a large dataset: ", "raw": "I built a Hyper Dimensional Computing (HDC) Encoder and Decoder and I also built out all of the code needed to replace the Encoder and Decoder of a Llama model with this HDC model, then train the Llama model on the HDC Encoder/Decoder. All MIT licensed. Here is a video where I break it all down. I can answer any questions about this project or help anyone out where I can. I am not a super developer or anything and I don't have access to enough compute to train this on a large dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/4VsZpGaPK4g", "href": "https://youtu.be/4VsZpGaPK4g", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I built a Hyper Dimensional Computing (HDC) Encoder and Decoder and I also built out all of the code needed to replace the Encoder and Decoder of a Llama model with this HDC model, then train the Llama model on the HDC Encoder/Decoder. All MIT licensed. Here is a video where I break it all down. I can answer any questions about this project or help anyone out where I can. I am not a super developer or anything and I don't have access to enough compute to train this on a large dataset: https://youtu.be/4VsZpGaPK4g
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Joseph717171", "ZeroXClem", "xi0v" ], "count": 4 } ]
2024-10-18T02:18:32.000Z
2024-10-18T07:21:27.062Z
[ { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false } ]
/posts/TuringsSolutions/510101665347796
923
1
127269471333935
[ { "type": "text", "value": "Here is how we can calculate the size of any LLM model:", "raw": "Here is how we can calculate the size of any LLM model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Each parameter in LLM models is typically stored as a floating-point number. The size of each parameter in bytes depends on the precision. ", "raw": "Each parameter in LLM models is typically stored as a floating-point number. The size of each parameter in bytes depends on the precision. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "32-bit precision: Each parameter takes 4 bytes.", "raw": "32-bit precision: Each parameter takes 4 bytes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "16-bit precision: Each parameter takes 2 bytes ", "raw": "16-bit precision: Each parameter takes 2 bytes ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To calculate the total memory usage of the model:", "raw": "To calculate the total memory usage of the model:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Memory usage (in bytes) = No. of Parameters ร— Size of Each Parameter", "raw": "Memory usage (in bytes) = No. of Parameters ร— Size of Each Parameter", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For example:", "raw": "For example:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "32-bit Precision (FP32)", "raw": "32-bit Precision (FP32)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In 32-bit floating-point precision, each parameter takes 4 bytes.", "raw": "In 32-bit floating-point precision, each parameter takes 4 bytes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Memory usage in bytes = 1 billion parameters ร— 4 bytes", "raw": "Memory usage in bytes = 1 billion parameters ร— 4 bytes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1,000,000,000 ร— 4 = 4,000,000,000 bytes", "raw": "1,000,000,000 ร— 4 = 4,000,000,000 bytes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In gigabytes: โ‰ˆ 3.73 GB", "raw": "In gigabytes: โ‰ˆ 3.73 GB", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "16-bit Precision (FP16)", "raw": "16-bit Precision (FP16)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In 16-bit floating-point precision, each parameter takes 2 bytes.", "raw": "In 16-bit floating-point precision, each parameter takes 2 bytes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Memory usage in bytes = 1 billion parameters ร— 2 bytes", "raw": "Memory usage in bytes = 1 billion parameters ร— 2 bytes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1,000,000,000 ร— 2 = 2,000,000,000 bytes", "raw": "1,000,000,000 ร— 2 = 2,000,000,000 bytes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In gigabytes: โ‰ˆ 1.86 GB", "raw": "In gigabytes: โ‰ˆ 1.86 GB", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It depends on whether you use 32-bit or 16-bit precision, a model with 1 billion parameters would use approximately 3.73 GB or 1.86 GB of memory, respectively.", "raw": "It depends on whether you use 32-bit or 16-bit precision, a model with 1 billion parameters would use approximately 3.73 GB or 1.86 GB of memory, respectively.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is how we can calculate the size of any LLM model: Each parameter in LLM models is typically stored as a floating-point number. The size of each parameter in bytes depends on the precision. 32-bit precision: Each parameter takes 4 bytes. 16-bit precision: Each parameter takes 2 bytes To calculate the total memory usage of the model: Memory usage (in bytes) = No. of Parameters ร— Size of Each Parameter For example: 32-bit Precision (FP32) In 32-bit floating-point precision, each parameter takes 4 bytes. Memory usage in bytes = 1 billion parameters ร— 4 bytes 1,000,000,000 ร— 4 = 4,000,000,000 bytes In gigabytes: โ‰ˆ 3.73 GB 16-bit Precision (FP16) In 16-bit floating-point precision, each parameter takes 2 bytes. Memory usage in bytes = 1 billion parameters ร— 2 bytes 1,000,000,000 ร— 2 = 2,000,000,000 bytes In gigabytes: โ‰ˆ 1.86 GB It depends on whether you use 32-bit or 16-bit precision, a model with 1 billion parameters would use approximately 3.73 GB or 1.86 GB of memory, respectively.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "zzlxv", "John6666", "danielus", "Ninjanimus", "san122", "SporkySporkness", "AtAndDev", "adriszmar" ], "count": 8 } ]
2024-10-17T22:12:21.000Z
2024-10-17T22:12:55.366Z
[]
/posts/ImranzamanML/127269471333935
1,292
0
716810945395633
[ { "type": "text", "value": "๐ŸŽ“ Introducing Ukr-lit.com.ua Presentations Dataset - ", "raw": "๐ŸŽ“ Introducing Ukr-lit.com.ua Presentations Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/ukr-lit", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/ukr-lit", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/ukr-lit", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 18,001 presentations from ukr-lit.com.ua, a platform for storing and viewing presentations covering a wide range of subjects in Ukrainian school education", "raw": "- 18,001 presentations from ukr-lit.com.ua, a platform for storing and viewing presentations covering a wide range of subjects in Ukrainian school education", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in Ukrainian, with some Russian and English content", "raw": "- Primarily in Ukrainian, with some Russian and English content", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "raw": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original PPT/PPTX files in addition to metadata", "raw": "- Contains original PPT/PPTX files in addition to metadata", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers a broad spectrum of educational topics and subjects taught in Ukrainian schools", "raw": "- Data covers a broad spectrum of educational topics and subjects taught in Ukrainian schools", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "raw": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing educational presentation content across various subjects in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in Ukrainian school education, teaching methodologies, and presentation materials used across different academic disciplines. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings, providing insights into the diverse range of subjects and teaching approaches in the Ukrainian school system.", "raw": "The dataset can be used for analyzing educational presentation content across various subjects in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in Ukrainian school education, teaching methodologies, and presentation materials used across different academic disciplines. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings, providing insights into the diverse range of subjects and teaching approaches in the Ukrainian school system.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ“ Introducing Ukr-lit.com.ua Presentations Dataset - https://huggingface.co/datasets/nyuuzyou/ukr-lit Dataset highlights: - 18,001 presentations from ukr-lit.com.ua, a platform for storing and viewing presentations covering a wide range of subjects in Ukrainian school education - Primarily in Ukrainian, with some Russian and English content - Each entry includes: URL, title, download URL, filepath, and extracted text content (where available) - Contains original PPT/PPTX files in addition to metadata - Data covers a broad spectrum of educational topics and subjects taught in Ukrainian schools - Dedicated to the public domain under Creative Commons Zero (CC0) license The dataset can be used for analyzing educational presentation content across various subjects in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in Ukrainian school education, teaching methodologies, and presentation materials used across different academic disciplines. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings, providing insights into the diverse range of subjects and teaching approaches in the Ukrainian school system.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-17T21:58:43.000Z
2024-10-17T21:58:43.467Z
[]
/posts/nyuuzyou/716810945395633
427
0
290133248979044
[ { "type": "text", "value": "๐ŸŒ Super cool visualization of global PUT requests to Hugging Face over 24 hours, coded by object size, thanks to ", "raw": "๐ŸŒ Super cool visualization of global PUT requests to Hugging Face over 24 hours, coded by object size, thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@port8080", "href": null, "resource": null, "url": null, "code": null, "user": "port8080", "label": null, "lang": null }, { "type": "text", "value": "! ", "raw": "! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We're putting this analysis to work to help us architect a more geo-distributed system for the HF storage backend.", "raw": "We're putting this analysis to work to help us architect a more geo-distributed system for the HF storage backend.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Originally shared on LinkedIn: ", "raw": "Originally shared on LinkedIn: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.linkedin.com/posts/ajitbanerjee_one-of-the-joys-of-working-on-the-xethub-activity-7252688424732614656-tFGD", "href": "https://www.linkedin.com/posts/ajitbanerjee_one-of-the-joys-of-working-on-the-xethub-activity-7252688424732614656-tFGD", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŒ Super cool visualization of global PUT requests to Hugging Face over 24 hours, coded by object size, thanks to @port8080! We're putting this analysis to work to help us architect a more geo-distributed system for the HF storage backend. Originally shared on LinkedIn: https://www.linkedin.com/posts/ajitbanerjee_one-of-the-joys-of-working-on-the-xethub-activity-7252688424732614656-tFGD
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66b05ca6e7c57eac7cafbbc4/nddUkS3xu78cxCS-r7-xB.jpeg", "fullname": "Ann Huang", "name": "erinys", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 27, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66b05ca6e7c57eac7cafbbc4/lRmwb7YelFX2rLoHhAqE_.gif" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e77dcc714ce98ddd82568e/KhIkyM1Hc00t3zAqIaDoH.jpeg", "fullname": "Banerjee", "name": "port8080", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 11 } ]
[ { "reaction": "๐Ÿš€", "users": [ "nyuuzyou", "port8080", "Awhildy", "AtAndDev", "John6666", "gabrielmbmb", "not-lain", "yjernite", "xi0v", "jsulz" ], "count": 10 } ]
2024-10-17T17:54:25.000Z
2024-10-17T23:14:22.563Z
[]
/posts/erinys/290133248979044
2,127
0
706599655765982
[ { "type": "text", "value": "Big news! You can now build strong ML models without days of human labelling", "raw": "Big news! You can now build strong ML models without days of human labelling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You simply:", "raw": "You simply:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Define your dataset, including annotation guidelines, labels and fields", "raw": "- Define your dataset, including annotation guidelines, labels and fields", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Optionally label some records manually.", "raw": "- Optionally label some records manually.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Use an LLM to auto label your data with a human (you? your team?) in the loop!", "raw": "- Use an LLM to auto label your data with a human (you? your team?) in the loop!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Get started with this blog post:", "raw": "Get started with this blog post:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback", "href": "https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Big news! You can now build strong ML models without days of human labelling You simply: - Define your dataset, including annotation guidelines, labels and fields - Optionally label some records manually. - Use an LLM to auto label your data with a human (you? your team?) in the loop! Get started with this blog post: https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "roger-temp", "victor", "ahusain", "xi0v" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "ijohn07" ], "count": 1 } ]
2024-10-17T15:21:50.000Z
2024-10-17T15:21:50.511Z
[]
/posts/dvilasuero/706599655765982
985
0
396291357438972
[ { "type": "text", "value": "You can now build a custom text classifier without days of human labeling!", "raw": "You can now build a custom text classifier without days of human labeling!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘ LLMs work reasonably well as text classifiers.", "raw": "๐Ÿ‘ LLMs work reasonably well as text classifiers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘Ž They are expensive to run at scale and their performance drops in specialized domains.", "raw": "๐Ÿ‘Ž They are expensive to run at scale and their performance drops in specialized domains.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘ Purpose-built classifiers have low latency and can potentially run on CPU.", "raw": "๐Ÿ‘ Purpose-built classifiers have low latency and can potentially run on CPU.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘Ž They require labeled training data.", "raw": "๐Ÿ‘Ž They require labeled training data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Combine the best of both worlds: the automatic labeling capabilities of LLMs and the high-quality annotations from human experts to train and deploy a specialized model.", "raw": "Combine the best of both worlds: the automatic labeling capabilities of LLMs and the high-quality annotations from human experts to train and deploy a specialized model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog: ", "raw": "Blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback", "href": "https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
You can now build a custom text classifier without days of human labeling! ๐Ÿ‘ LLMs work reasonably well as text classifiers. ๐Ÿ‘Ž They are expensive to run at scale and their performance drops in specialized domains. ๐Ÿ‘ Purpose-built classifiers have low latency and can potentially run on CPU. ๐Ÿ‘Ž They require labeled training data. Combine the best of both worlds: the automatic labeling capabilities of LLMs and the high-quality annotations from human experts to train and deploy a specialized model. Blog: https://huggingface.co/blog/sdiazlor/custom-text-classifier-ai-human-feedback
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "โค๏ธ", "users": [ "dvilasuero", "AtAndDev", "NickoSELI", "tiendung", "Darklight0012", "davidberenstein1957", "clem" ], "count": 7 }, { "reaction": "โž•", "users": [ "nyuuzyou", "John6666", "victor", "Ninjanimus", "AtAndDev", "tiendung", "davidberenstein1957" ], "count": 7 } ]
2024-10-17T15:21:27.000Z
2024-10-17T15:21:41.148Z
[]
/posts/davidberenstein1957/396291357438972
1,689
0
875962742909412
[ { "type": "text", "value": "By far the coolest release of the day!", "raw": "By far the coolest release of the day!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> The Open LLM Leaderboard, most comprehensive suite for comparing Open LLMs on many benchmarks, just released a comparator tool that lets you dig into the detail of differences between any models.", "raw": "> The Open LLM Leaderboard, most comprehensive suite for comparing Open LLMs on many benchmarks, just released a comparator tool that lets you dig into the detail of differences between any models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's me checking how the new Llama-3.1-Nemotron-70B that we've heard so much compares to the original Llama-3.1-70B. ๐Ÿค”๐Ÿ”Ž", "raw": "Here's me checking how the new Llama-3.1-Nemotron-70B that we've heard so much compares to the original Llama-3.1-70B. ๐Ÿค”๐Ÿ”Ž", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it out here ๐Ÿ‘‰ ", "raw": "Try it out here ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/comparator", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "code": null, "user": null, "label": null, "lang": null } ]
By far the coolest release of the day! > The Open LLM Leaderboard, most comprehensive suite for comparing Open LLMs on many benchmarks, just released a comparator tool that lets you dig into the detail of differences between any models. Here's me checking how the new Llama-3.1-Nemotron-70B that we've heard so much compares to the original Llama-3.1-70B. ๐Ÿค”๐Ÿ”Ž Try it out here ๐Ÿ‘‰ https://huggingface.co/spaces/open-llm-leaderboard/comparator
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/6KN6wRJIMxRkI1l4F2SwZ.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "davidberenstein1957", "DmitryRyumin", "laelhalawani", "dvilasuero", "dross20" ], "count": 6 }, { "reaction": "๐Ÿ‘", "users": [ "LeroyDyer", "SandInTheDunes" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "LeroyDyer" ], "count": 1 } ]
2024-10-17T14:25:04.000Z
2024-10-18T15:10:30.530Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6403c20ddbfbea2a0540983b/AHxgTaS-FuFNnoGJBDgXI.jpeg", "fullname": "lael al-halawani", "name": "laelhalawani", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", "fullname": "leroy Samuel Dyer", "name": "LeroyDyer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false } ]
/posts/m-ric/875962742909412
1,693
2
807595638767092
[ { "type": "text", "value": "100 followers? When did that happen?", "raw": "100 followers? When did that happen?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
100 followers? When did that happen?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ˜Ž", "users": [ "John6666", "davidberenstein1957", "AtAndDev", "robbiemu", "LeroyDyer", "den0620" ], "count": 6 }, { "reaction": "๐Ÿš€", "users": [ "AtAndDev", "nroggendorff" ], "count": 2 } ]
2024-10-17T13:56:27.000Z
2024-10-17T13:56:27.090Z
[]
/posts/nroggendorff/807595638767092
1,254
0
460360432918193
[ { "type": "text", "value": "๐Ÿšจ Weโ€™ve just released a new tool to compare the performance of models in the ๐Ÿค— Open LLM Leaderboard: the Comparator ๐ŸŽ‰", "raw": "๐Ÿšจ Weโ€™ve just released a new tool to compare the performance of models in the ๐Ÿค— Open LLM Leaderboard: the Comparator ๐ŸŽ‰", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/comparator", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Want to see how two different versions of LLaMA stack up? Letโ€™s walk through a step-by-step comparison of LLaMA-3.1 and LLaMA-3.2. ๐Ÿฆ™๐Ÿงต๐Ÿ‘‡", "raw": "Want to see how two different versions of LLaMA stack up? Letโ€™s walk through a step-by-step comparison of LLaMA-3.1 and LLaMA-3.2. ๐Ÿฆ™๐Ÿงต๐Ÿ‘‡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1/ Load the Models' Results", "raw": "1/ Load the Models' Results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Go to the ๐Ÿค— Open LLM Leaderboard Comparator: ", "raw": "- Go to the ๐Ÿค— Open LLM Leaderboard Comparator: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/comparator", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Search for \"LLaMA-3.1\" and \"LLaMA-3.2\" in the model dropdowns.", "raw": "- Search for \"LLaMA-3.1\" and \"LLaMA-3.2\" in the model dropdowns.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Press the Load button. Ready to dive into the results!", "raw": "- Press the Load button. Ready to dive into the results!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2/ Compare Metric Results in the Results Tab ๐Ÿ“Š", "raw": "2/ Compare Metric Results in the Results Tab ๐Ÿ“Š", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Head over to the Results tab.", "raw": "- Head over to the Results tab.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Here, youโ€™ll see the performance metrics for each model, beautifully color-coded using a gradient to highlight performance differences: greener is better! ๐ŸŒŸ", "raw": "- Here, youโ€™ll see the performance metrics for each model, beautifully color-coded using a gradient to highlight performance differences: greener is better! ๐ŸŒŸ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Want to focus on a specific task? Use the Task filter to hone in on comparisons for tasks like BBH or MMLU-Pro.", "raw": "- Want to focus on a specific task? Use the Task filter to hone in on comparisons for tasks like BBH or MMLU-Pro.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3/ Check Config Alignment in the Configs Tab โš™๏ธ", "raw": "3/ Check Config Alignment in the Configs Tab โš™๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- To ensure youโ€™re comparing apples to apples, head to the Configs tab.", "raw": "- To ensure youโ€™re comparing apples to apples, head to the Configs tab.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Review both modelsโ€™ evaluation configurations, such as metrics, datasets, prompts, few-shot configs...", "raw": "- Review both modelsโ€™ evaluation configurations, such as metrics, datasets, prompts, few-shot configs...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- If something looks off, itโ€™s good to know before drawing conclusions! โœ…", "raw": "- If something looks off, itโ€™s good to know before drawing conclusions! โœ…", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4/ Compare Predictions by Sample in the Details Tab ๐Ÿ”", "raw": "4/ Compare Predictions by Sample in the Details Tab ๐Ÿ”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Curious about how each model responds to specific inputs? The Details tab is your go-to!", "raw": "- Curious about how each model responds to specific inputs? The Details tab is your go-to!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Select a Task (e.g., MuSR) and then a Subtask (e.g., Murder Mystery) and then press the Load Details button.", "raw": "- Select a Task (e.g., MuSR) and then a Subtask (e.g., Murder Mystery) and then press the Load Details button.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Check out the side-by-side predictions and dive into the nuances of each modelโ€™s outputs.", "raw": "- Check out the side-by-side predictions and dive into the nuances of each modelโ€™s outputs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5/ With this tool, itโ€™s never been easier to explore how small changes between model versions affect performance on a wide range of tasks. Whether youโ€™re a researcher or enthusiast, you can instantly visualize improvements and dive into detailed comparisons.", "raw": "5/ With this tool, itโ€™s never been easier to explore how small changes between model versions affect performance on a wide range of tasks. Whether youโ€™re a researcher or enthusiast, you can instantly visualize improvements and dive into detailed comparisons.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿš€ Try the ๐Ÿค— Open LLM Leaderboard Comparator now and take your model evaluations to the next level!", "raw": "๐Ÿš€ Try the ๐Ÿค— Open LLM Leaderboard Comparator now and take your model evaluations to the next level!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿšจ Weโ€™ve just released a new tool to compare the performance of models in the ๐Ÿค— Open LLM Leaderboard: the Comparator ๐ŸŽ‰ https://huggingface.co/spaces/open-llm-leaderboard/comparator Want to see how two different versions of LLaMA stack up? Letโ€™s walk through a step-by-step comparison of LLaMA-3.1 and LLaMA-3.2. ๐Ÿฆ™๐Ÿงต๐Ÿ‘‡ 1/ Load the Models' Results - Go to the ๐Ÿค— Open LLM Leaderboard Comparator: https://huggingface.co/spaces/open-llm-leaderboard/comparator - Search for "LLaMA-3.1" and "LLaMA-3.2" in the model dropdowns. - Press the Load button. Ready to dive into the results! 2/ Compare Metric Results in the Results Tab ๐Ÿ“Š - Head over to the Results tab. - Here, youโ€™ll see the performance metrics for each model, beautifully color-coded using a gradient to highlight performance differences: greener is better! ๐ŸŒŸ - Want to focus on a specific task? Use the Task filter to hone in on comparisons for tasks like BBH or MMLU-Pro. 3/ Check Config Alignment in the Configs Tab โš™๏ธ - To ensure youโ€™re comparing apples to apples, head to the Configs tab. - Review both modelsโ€™ evaluation configurations, such as metrics, datasets, prompts, few-shot configs... - If something looks off, itโ€™s good to know before drawing conclusions! โœ… 4/ Compare Predictions by Sample in the Details Tab ๐Ÿ” - Curious about how each model responds to specific inputs? The Details tab is your go-to! - Select a Task (e.g., MuSR) and then a Subtask (e.g., Murder Mystery) and then press the Load Details button. - Check out the side-by-side predictions and dive into the nuances of each modelโ€™s outputs. 5/ With this tool, itโ€™s never been easier to explore how small changes between model versions affect performance on a wide range of tasks. Whether youโ€™re a researcher or enthusiast, you can instantly visualize improvements and dive into detailed comparisons. ๐Ÿš€ Try the ๐Ÿค— Open LLM Leaderboard Comparator now and take your model evaluations to the next level!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1606406298765-noauth.jpeg", "fullname": "Albert Villanova del Moral", "name": "albertvillanova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 196, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "m4r14n3s", "akjindal53244", "DmitryRyumin", "dvilasuero", "gabrielmbmb", "Joseph717171", "louisbrulenaudet" ], "count": 7 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Joseph717171" ], "count": 2 } ]
2024-10-17T13:24:58.000Z
2024-10-17T13:24:58.120Z
[]
/posts/albertvillanova/460360432918193
1,931
0
158055447248984
[ { "type": "text", "value": "This has to be the first peak performance level use case of a non-autoregressive architecture for TTS. Flow matching for the win!!", "raw": "This has to be the first peak performance level use case of a non-autoregressive architecture for TTS. Flow matching for the win!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/mrfakename/E2-F5-TTS", "href": null, "resource": { "type": "space", "id": "mrfakename/E2-F5-TTS", "discussionNum": null }, "url": "https://huggingface.co/spaces/mrfakename/E2-F5-TTS", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/SWivid/E2-TTS", "href": null, "resource": { "type": "model", "id": "SWivid/E2-TTS", "discussionNum": null }, "url": "https://huggingface.co/SWivid/E2-TTS", "code": null, "user": null, "label": null, "lang": null } ]
This has to be the first peak performance level use case of a non-autoregressive architecture for TTS. Flow matching for the win!! Demo: https://huggingface.co/spaces/mrfakename/E2-F5-TTS Model: https://huggingface.co/SWivid/E2-TTS
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/_BAn6zpN2BBJnQCP50vrx.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/7o2SieFEKg6I3OqfTexHK.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/fvYfWQOIsV_oOgTtV-YTx.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-17T12:00:07.000Z
2024-10-17T12:00:07.501Z
[]
/posts/Jaward/158055447248984
401
0
287217970924668
[ { "type": "text", "value": "All the way from Korea, a novel approach called Mentor-KD significantly improves the reasoning abilities of small language models.", "raw": "All the way from Korea, a novel approach called Mentor-KD significantly improves the reasoning abilities of small language models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mentor-KD introduces an intermediate-sized \"mentor\" model to augment training data and provide soft labels during knowledge distillation from large language models (LLMs) to smaller models.", "raw": "Mentor-KD introduces an intermediate-sized \"mentor\" model to augment training data and provide soft labels during knowledge distillation from large language models (LLMs) to smaller models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Broadly, itโ€™s a two-stage process:", "raw": "Broadly, itโ€™s a two-stage process:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1) Fine-tune the mentor on filtered Chain-of-Thought (CoT) annotations from an LLM teacher. ", "raw": "1) Fine-tune the mentor on filtered Chain-of-Thought (CoT) annotations from an LLM teacher. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2) Use the mentor to generate additional CoT rationales and soft probability distributions.", "raw": "2) Use the mentor to generate additional CoT rationales and soft probability distributions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The student model is then trained using:", "raw": "The student model is then trained using:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- CoT rationales from both the teacher and mentor (rationale distillation). ", "raw": "- CoT rationales from both the teacher and mentor (rationale distillation). ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Soft labels from the mentor (soft label distillation).", "raw": "- Soft labels from the mentor (soft label distillation).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Results show that Mentor-KD consistently outperforms baselines, with up to 5% accuracy gains on some tasks.", "raw": "Results show that Mentor-KD consistently outperforms baselines, with up to 5% accuracy gains on some tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mentor-KD is especially effective in low-resource scenarios, achieving comparable performance to baselines while using only 40% of the original training data.", "raw": "Mentor-KD is especially effective in low-resource scenarios, achieving comparable performance to baselines while using only 40% of the original training data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This work opens up exciting possibilities for making smaller, more efficient language models better at complex reasoning tasks.", "raw": "This work opens up exciting possibilities for making smaller, more efficient language models better at complex reasoning tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What are your thoughts on this approach?", "raw": "What are your thoughts on this approach?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
All the way from Korea, a novel approach called Mentor-KD significantly improves the reasoning abilities of small language models. Mentor-KD introduces an intermediate-sized "mentor" model to augment training data and provide soft labels during knowledge distillation from large language models (LLMs) to smaller models. Broadly, itโ€™s a two-stage process: 1) Fine-tune the mentor on filtered Chain-of-Thought (CoT) annotations from an LLM teacher. 2) Use the mentor to generate additional CoT rationales and soft probability distributions. The student model is then trained using: - CoT rationales from both the teacher and mentor (rationale distillation). - Soft labels from the mentor (soft label distillation). Results show that Mentor-KD consistently outperforms baselines, with up to 5% accuracy gains on some tasks. Mentor-KD is especially effective in low-resource scenarios, achieving comparable performance to baselines while using only 40% of the original training data. This work opens up exciting possibilities for making smaller, more efficient language models better at complex reasoning tasks. What are your thoughts on this approach?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/3WF-E3Z52t4YxkIw2VUvA.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-17T10:39:16.000Z
2024-10-17T10:39:16.062Z
[]
/posts/singhsidhukuldeep/287217970924668
394
0
158124931831866
[ { "type": "resource", "value": null, "raw": "https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "href": null, "resource": { "type": "model", "id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "discussionNum": null }, "url": "https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is now available in HuggingChat if you want to try it!", "raw": " is now available in HuggingChat if you want to try it!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/chat/models/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "href": "https://huggingface.co/chat/models/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF is now available in HuggingChat if you want to try it! https://huggingface.co/chat/models/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/g7jvwSHAIWx8-Yd-fO6Bk.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "KingNish", "prithivMLmods", "jgitsolutions", "John6666", "adorkin", "AtAndDev", "clem" ], "count": 7 }, { "reaction": "๐Ÿš€", "users": [ "John6666", "AtAndDev" ], "count": 2 } ]
2024-10-17T08:18:36.000Z
2024-10-17T08:18:36.340Z
[]
/posts/victor/158124931831866
1,648
0
399387871318842
[ { "type": "text", "value": "๐Ÿšจ I have $3,500 in Azure credits, including access to an H100 (96 Go), expiring on November 12, 2024. ", "raw": "๐Ÿšจ I have $3,500 in Azure credits, including access to an H100 (96 Go), expiring on November 12, 2024. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I wonโ€™t be able to use it all myself, so Iโ€™m reaching out to the ", "raw": "I wonโ€™t be able to use it all myself, so Iโ€™m reaching out to the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@huggingface", "href": null, "resource": null, "url": null, "code": null, "user": "huggingface", "label": null, "lang": null }, { "type": "text", "value": " community: Are there any open-source projets with data ready for some compute power?", "raw": " community: Are there any open-source projets with data ready for some compute power?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " Letโ€™s collaborate and make the most of it together ๐Ÿ”—", "raw": " Letโ€™s collaborate and make the most of it together ๐Ÿ”—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿšจ I have $3,500 in Azure credits, including access to an H100 (96 Go), expiring on November 12, 2024. I wonโ€™t be able to use it all myself, so Iโ€™m reaching out to the @huggingface community: Are there any open-source projets with data ready for some compute power? Letโ€™s collaborate and make the most of it together ๐Ÿ”—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg", "fullname": "Louis Brulรฉ Naudet", "name": "louisbrulenaudet", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 174, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6459fa0f5b3111fbe83286e1/_knB1bTdLPAIkJMvY4YkI.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "edgar222", "jgitsolutions", "robbiemu", "oza75", "julien-c", "RickyPossum", "AtAndDev", "xi0v", "den0620", "louisbrulenaudet" ], "count": 10 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "cdnuts", "kroonen", "AtAndDev", "xi0v" ], "count": 5 } ]
2024-10-17T06:55:11.000Z
2024-10-29T22:52:08.080Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65a76f3cac3a06f3e8bdf9f5/hYTFOYj1Pca0ZOugSE42o.jpeg", "fullname": "Pankaj Singh", "name": "Pankaj8922", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "/avatars/1b2e6f3ea2bac5ab35dbd53edb7f8cf2.svg", "fullname": "Siddartha Pullakhandam", "name": "Siddartha10", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "/avatars/82b00fa5ae10bdecc6b927c2aa177613.svg", "fullname": "Cuong Vu", "name": "vunhucuongit", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3e4002ce39336c411048/FXJON7b-aRUiH0_V2uRsi.jpeg", "fullname": "alkinun", "name": "AtAndDev", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 19, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bf2183e5330789930d8eb8/1_2RVCGBpsFHv2GGQ2K-F.jpeg", "fullname": "Arthur Souza Rodrigues", "name": "arthrod", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg", "fullname": "Louis Brulรฉ Naudet", "name": "louisbrulenaudet", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 174, "isFollowing": false }, { "avatarUrl": "/avatars/9932a4a07dbfe15a152f847b51d06e8a.svg", "fullname": "Baki", "name": "hangingardens", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/louisbrulenaudet/399387871318842
3,094
9
504398154047309
[ { "type": "text", "value": "I am here to provide you with the premium codes you want just by informing me your requirements. #program #AI #code", "raw": "I am here to provide you with the premium codes you want just by informing me your requirements. #program #AI #code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Premium_Code", "raw": "Premium_Code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hf.co/chat/assistant/670fd7b2ad7fdbb38ff98102", "href": "https://hf.co/chat/assistant/670fd7b2ad7fdbb38ff98102", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I am here to provide you with the premium codes you want just by informing me your requirements. #program #AI #code Premium_Code https://hf.co/chat/assistant/670fd7b2ad7fdbb38ff98102
{ "avatarUrl": "/avatars/d773a7dd9b706759131fc482ab71ced7.svg", "fullname": "[email protected]", "name": "Taf2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64841af2295256340e4b9f88/9ir5nncB5XhFRNl8Ozm9D.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "jhaayus" ], "count": 1 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-17T00:24:15.000Z
2024-10-17T00:24:15.894Z
[]
/posts/Taf2023/504398154047309
1,134
0
358729012419970
[ { "type": "text", "value": "Hi there HuggingFacers!", "raw": "Hi there HuggingFacers!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have you ever dreamt of an improbable books crossover, like Frodo from ๐˜“๐˜ฐ๐˜ณ๐˜ฅ ๐˜ฐ๐˜ง ๐˜ต๐˜ฉ๐˜ฆ ๐˜™๐˜ช๐˜ฏ๐˜จ๐˜ด becoming the main character of the ๐˜–๐˜ฅ๐˜บ๐˜ด๐˜ด๐˜ฆ๐˜บ or Emma Bovary from ๐˜”๐˜ข๐˜ฅ๐˜ข๐˜ฎ๐˜ฆ ๐˜‰๐˜ฐ๐˜ท๐˜ข๐˜ณ๐˜บ acting as a modern-days Shakespearean Juliet?", "raw": "Have you ever dreamt of an improbable books crossover, like Frodo from ๐˜“๐˜ฐ๐˜ณ๐˜ฅ ๐˜ฐ๐˜ง ๐˜ต๐˜ฉ๐˜ฆ ๐˜™๐˜ช๐˜ฏ๐˜จ๐˜ด becoming the main character of the ๐˜–๐˜ฅ๐˜บ๐˜ด๐˜ด๐˜ฆ๐˜บ or Emma Bovary from ๐˜”๐˜ข๐˜ฅ๐˜ข๐˜ฎ๐˜ฆ ๐˜‰๐˜ฐ๐˜ท๐˜ข๐˜ณ๐˜บ acting as a modern-days Shakespearean Juliet?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Well, all of this is now possible! I'm thrilled to introduce my latest opensource product for storytelling: ๐›๐จ๐จ๐ค๐ฌ-๐ฆ๐ข๐ฑ๐ž๐ซ-๐š๐ข ๐ฏ๐ŸŽ.๐ŸŽ.๐ŸŽ !", "raw": "Well, all of this is now possible! I'm thrilled to introduce my latest opensource product for storytelling: ๐›๐จ๐จ๐ค๐ฌ-๐ฆ๐ข๐ฑ๐ž๐ซ-๐š๐ข ๐ฏ๐ŸŽ.๐ŸŽ.๐ŸŽ !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Built with ReactJS and shipped directly to you on Spaces thanks to Docker, this webapp combines the power of two AI tools:", "raw": "Built with ReactJS and shipped directly to you on Spaces thanks to Docker, this webapp combines the power of two AI tools:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- gpt-4o-mini by OpenAI, which takes care of cooking new and intriguing plots starting from the user's instructions, the titles and the summaries of the two books to mix (summaries are scraped through Wikipedia)", "raw": "- gpt-4o-mini by OpenAI, which takes care of cooking new and intriguing plots starting from the user's instructions, the titles and the summaries of the two books to mix (summaries are scraped through Wikipedia)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- text2img realtime API by ModelsLab, which provides a stable diffusion pipeline to create a thumbnail for your newly-generated story", "raw": "- text2img realtime API by ModelsLab, which provides a stable diffusion pipeline to create a thumbnail for your newly-generated story", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Everything is provided under a simple and intuitive UI, which uses chatscope's React template kit.", "raw": "Everything is provided under a simple and intuitive UI, which uses chatscope's React template kit.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Curious of trying? The app is already live at:", "raw": "Curious of trying? The app is already live at:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/as-cle-bert/books-mixer-ai", "href": null, "resource": { "type": "space", "id": "as-cle-bert/books-mixer-ai", "discussionNum": null }, "url": "https://huggingface.co/spaces/as-cle-bert/books-mixer-ai", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And you can also have a tour of the GitHub repo (and leave a little โญ while you're there):", "raw": "And you can also have a tour of the GitHub repo (and leave a little โญ while you're there):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AstraBert/books-mixer-ai", "href": "https://github.com/AstraBert/books-mixer-ai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The documentation is still under construction, but will become available soon๐Ÿ˜Š", "raw": "The documentation is still under construction, but will become available soon๐Ÿ˜Š", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have fun!๐Ÿ“š๐Ÿ“š", "raw": "Have fun!๐Ÿ“š๐Ÿ“š", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi there HuggingFacers! Have you ever dreamt of an improbable books crossover, like Frodo from ๐˜“๐˜ฐ๐˜ณ๐˜ฅ ๐˜ฐ๐˜ง ๐˜ต๐˜ฉ๐˜ฆ ๐˜™๐˜ช๐˜ฏ๐˜จ๐˜ด becoming the main character of the ๐˜–๐˜ฅ๐˜บ๐˜ด๐˜ด๐˜ฆ๐˜บ or Emma Bovary from ๐˜”๐˜ข๐˜ฅ๐˜ข๐˜ฎ๐˜ฆ ๐˜‰๐˜ฐ๐˜ท๐˜ข๐˜ณ๐˜บ acting as a modern-days Shakespearean Juliet? Well, all of this is now possible! I'm thrilled to introduce my latest opensource product for storytelling: ๐›๐จ๐จ๐ค๐ฌ-๐ฆ๐ข๐ฑ๐ž๐ซ-๐š๐ข ๐ฏ๐ŸŽ.๐ŸŽ.๐ŸŽ ! Built with ReactJS and shipped directly to you on Spaces thanks to Docker, this webapp combines the power of two AI tools: - gpt-4o-mini by OpenAI, which takes care of cooking new and intriguing plots starting from the user's instructions, the titles and the summaries of the two books to mix (summaries are scraped through Wikipedia) - text2img realtime API by ModelsLab, which provides a stable diffusion pipeline to create a thumbnail for your newly-generated story Everything is provided under a simple and intuitive UI, which uses chatscope's React template kit. Curious of trying? The app is already live at: https://huggingface.co/spaces/as-cle-bert/books-mixer-ai And you can also have a tour of the GitHub repo (and leave a little โญ while you're there): https://github.com/AstraBert/books-mixer-ai The documentation is still under construction, but will become available soon๐Ÿ˜Š Have fun!๐Ÿ“š๐Ÿ“š
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg", "fullname": "Astra Clelia Bertelli", "name": "as-cle-bert", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 650, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65e330e7edc2f7306e252448/GPJRhtNqulKsBbVgzOF42.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "iamrobotbear" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "awacke1" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "introvoyz041" ], "count": 1 } ]
2024-10-17T00:03:03.000Z
2024-10-17T00:03:03.513Z
[]
/posts/as-cle-bert/358729012419970
1,349
0
847164896381837
[ { "type": "text", "value": "Today I was able to solve a very difficult coding session with GPT-4o which ended up solving integrations on a very large scale. So I decided to look a bit more into how its reasoners work. Below is a fun markdown emoji outline about what I learned today and what I'm pursuing. ", "raw": "Today I was able to solve a very difficult coding session with GPT-4o which ended up solving integrations on a very large scale. So I decided to look a bit more into how its reasoners work. Below is a fun markdown emoji outline about what I learned today and what I'm pursuing. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hope you enjoy! Cheers, Aaron.", "raw": "Hope you enjoy! Cheers, Aaron.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also here are my favorite last 4 spaces I am working on:", "raw": "Also here are my favorite last 4 spaces I am working on:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. GPT4O: ", "raw": "1. GPT4O: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video", "href": null, "resource": { "type": "space", "id": "awacke1/GPT-4o-omni-text-audio-image-video", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Claude: ", "raw": "2. Claude: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/AnthropicClaude3.5Sonnet-ACW", "href": null, "resource": { "type": "space", "id": "awacke1/AnthropicClaude3.5Sonnet-ACW", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/AnthropicClaude3.5Sonnet-ACW", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. MSGraph M365: ", "raw": "3. MSGraph M365: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/MSGraphAPI", "href": null, "resource": { "type": "space", "id": "awacke1/MSGraphAPI", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/MSGraphAPI", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Azure Cosmos DB: Now with Research AI! ", "raw": "4. Azure Cosmos DB: Now with Research AI! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/AzureCosmosDBUI", "href": null, "resource": { "type": "space", "id": "awacke1/AzureCosmosDBUI", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/AzureCosmosDBUI", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "# ๐Ÿš€ OpenAI's O1 Models: A Quantum Leap in AI", "raw": "# ๐Ÿš€ OpenAI's O1 Models: A Quantum Leap in AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 1. ๐Ÿค” From ๐Ÿฆœ to ๐Ÿง : O1's Evolution", "raw": "## 1. ๐Ÿค” From ๐Ÿฆœ to ๐Ÿง : O1's Evolution", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Thinking AI**: O1 ponders before replying; GPT models just predict. ๐Ÿ’ก", "raw": "- **Thinking AI**: O1 ponders before replying; GPT models just predict. ๐Ÿ’ก", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 2. ๐Ÿ“š AI Memory: ๐Ÿ’พ + ๐Ÿงฉ = ๐Ÿง ", "raw": "## 2. ๐Ÿ“š AI Memory: ๐Ÿ’พ + ๐Ÿงฉ = ๐Ÿง ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Embeddings & Tokens**: Words โžก๏ธ vectors, building knowledge. ๐Ÿ“–", "raw": "- **Embeddings & Tokens**: Words โžก๏ธ vectors, building knowledge. ๐Ÿ“–", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 3. ๐Ÿ” Swift Knowledge Retrieval", "raw": "## 3. ๐Ÿ” Swift Knowledge Retrieval", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Vector Search & Indexing**: O1 finds info fast, citing reliable sources. ๐Ÿ”Ž๐Ÿ“–", "raw": "- **Vector Search & Indexing**: O1 finds info fast, citing reliable sources. ๐Ÿ”Ž๐Ÿ“–", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 4. ๐ŸŒณ Logic Trees with Mermaid Models", "raw": "## 4. ๐ŸŒณ Logic Trees with Mermaid Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Flowchart Reasoning**: O1 structures thoughts like diagrams. ๐ŸŽจ๐ŸŒ", "raw": "- **Flowchart Reasoning**: O1 structures thoughts like diagrams. ๐ŸŽจ๐ŸŒ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 5. ๐Ÿ’ป Coding Mastery", "raw": "## 5. ๐Ÿ’ป Coding Mastery", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Multilingual & Current**: Speaks many code languages, always up-to-date. ๐Ÿ’ป๐Ÿ”„", "raw": "- **Multilingual & Current**: Speaks many code languages, always up-to-date. ๐Ÿ’ป๐Ÿ”„", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 6. ๐Ÿ† Breaking Records", "raw": "## 6. ๐Ÿ† Breaking Records", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **92.3% MMLU Score**: O1 outperforms humans, setting new AI standards. ๐Ÿ…", "raw": "- **92.3% MMLU Score**: O1 outperforms humans, setting new AI standards. ๐Ÿ…", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 7. ๐Ÿ’ก Versatile Applications", "raw": "## 7. ๐Ÿ’ก Versatile Applications", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **Ultimate Assistant**: From fixing code to advancing research. ๐Ÿ› ๏ธ๐Ÿ”ฌ", "raw": "- **Ultimate Assistant**: From fixing code to advancing research. ๐Ÿ› ๏ธ๐Ÿ”ฌ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 8. ๐Ÿ Racing Toward AGI", "raw": "## 8. ๐Ÿ Racing Toward AGI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **OpenAI Leads**: O1 brings us closer to true AI intelligence. ๐Ÿš€", "raw": "- **OpenAI Leads**: O1 brings us closer to true AI intelligence. ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "## 9. ๐Ÿค– O1's Reasoning Pillars", "raw": "## 9. ๐Ÿค– O1's Reasoning Pillars", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **๐Ÿง  Chain of Thought**: Step-by-step logic.", "raw": "- **๐Ÿง  Chain of Thought**: Step-by-step logic.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **๐ŸŽฒ MCTS**: Simulates options, picks best path.", "raw": "- **๐ŸŽฒ MCTS**: Simulates options, picks best path.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **๐Ÿ” Reflection**: Self-improves autonomously.", "raw": "- **๐Ÿ” Reflection**: Self-improves autonomously.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- **๐Ÿ‹๏ธโ€โ™‚๏ธ Reinforcement Learning**: Gets smarter over time.", "raw": "- **๐Ÿ‹๏ธโ€โ™‚๏ธ Reinforcement Learning**: Gets smarter over time.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "---", "raw": "---", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "*Stay curious, keep coding!* ๐Ÿš€", "raw": "*Stay curious, keep coding!* ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Today I was able to solve a very difficult coding session with GPT-4o which ended up solving integrations on a very large scale. So I decided to look a bit more into how its reasoners work. Below is a fun markdown emoji outline about what I learned today and what I'm pursuing. Hope you enjoy! Cheers, Aaron. Also here are my favorite last 4 spaces I am working on: 1. GPT4O: https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video 2. Claude: https://huggingface.co/spaces/awacke1/AnthropicClaude3.5Sonnet-ACW 3. MSGraph M365: https://huggingface.co/spaces/awacke1/MSGraphAPI 4. Azure Cosmos DB: Now with Research AI! https://huggingface.co/spaces/awacke1/AzureCosmosDBUI # ๐Ÿš€ OpenAI's O1 Models: A Quantum Leap in AI ## 1. ๐Ÿค” From ๐Ÿฆœ to ๐Ÿง : O1's Evolution - **Thinking AI**: O1 ponders before replying; GPT models just predict. ๐Ÿ’ก ## 2. ๐Ÿ“š AI Memory: ๐Ÿ’พ + ๐Ÿงฉ = ๐Ÿง  - **Embeddings & Tokens**: Words โžก๏ธ vectors, building knowledge. ๐Ÿ“– ## 3. ๐Ÿ” Swift Knowledge Retrieval - **Vector Search & Indexing**: O1 finds info fast, citing reliable sources. ๐Ÿ”Ž๐Ÿ“– ## 4. ๐ŸŒณ Logic Trees with Mermaid Models - **Flowchart Reasoning**: O1 structures thoughts like diagrams. ๐ŸŽจ๐ŸŒ ## 5. ๐Ÿ’ป Coding Mastery - **Multilingual & Current**: Speaks many code languages, always up-to-date. ๐Ÿ’ป๐Ÿ”„ ## 6. ๐Ÿ† Breaking Records - **92.3% MMLU Score**: O1 outperforms humans, setting new AI standards. ๐Ÿ… ## 7. ๐Ÿ’ก Versatile Applications - **Ultimate Assistant**: From fixing code to advancing research. ๐Ÿ› ๏ธ๐Ÿ”ฌ ## 8. ๐Ÿ Racing Toward AGI - **OpenAI Leads**: O1 brings us closer to true AI intelligence. ๐Ÿš€ ## 9. ๐Ÿค– O1's Reasoning Pillars - **๐Ÿง  Chain of Thought**: Step-by-step logic. - **๐ŸŽฒ MCTS**: Simulates options, picks best path. - **๐Ÿ” Reflection**: Self-improves autonomously. - **๐Ÿ‹๏ธโ€โ™‚๏ธ Reinforcement Learning**: Gets smarter over time. --- *Stay curious, keep coding!* ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/w0sQsuk0PTM_cUNU8EAGS.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-16T23:20:13.000Z
2024-10-17T07:47:09.433Z
[ { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/awacke1/847164896381837
696
1
614979545779626
[ { "type": "text", "value": "In regards to the latest mistral model and GGUFs for it:", "raw": "In regards to the latest mistral model and GGUFs for it:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yes, they may be subpar and may require changes to llama.cpp to support the interleaved sliding window", "raw": "Yes, they may be subpar and may require changes to llama.cpp to support the interleaved sliding window", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yes, I got excited when a conversion worked and released them ASAP", "raw": "Yes, I got excited when a conversion worked and released them ASAP", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "That said, generation seems to work right now and seems to mimic the output from spaces that are running the original model", "raw": "That said, generation seems to work right now and seems to mimic the output from spaces that are running the original model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I have appended -TEST to the model names in an attempt to indicate that they are not final or perfect, but if people still feel mislead and that it's not the right thing to do, please post (civilly) below your thoughts, I will highly consider pulling the conversions if that's what people think is best. After all, that's what I'm here for, in service to you all !", "raw": "I have appended -TEST to the model names in an attempt to indicate that they are not final or perfect, but if people still feel mislead and that it's not the right thing to do, please post (civilly) below your thoughts, I will highly consider pulling the conversions if that's what people think is best. After all, that's what I'm here for, in service to you all !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
In regards to the latest mistral model and GGUFs for it: Yes, they may be subpar and may require changes to llama.cpp to support the interleaved sliding window Yes, I got excited when a conversion worked and released them ASAP That said, generation seems to work right now and seems to mimic the output from spaces that are running the original model I have appended -TEST to the model names in an attempt to indicate that they are not final or perfect, but if people still feel mislead and that it's not the right thing to do, please post (civilly) below your thoughts, I will highly consider pulling the conversions if that's what people think is best. After all, that's what I'm here for, in service to you all !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2816, "isFollowing": false }
[]
[]
[ { "reaction": "โค๏ธ", "users": [ "YaTharThShaRma999", "johnnyc3p", "John6666", "SporkySporkness", "Delta-Vector", "Firepal3D", "vonjack", "driib", "vsenn", "G30", "bruceunx", "victor", "aziztbz", "Maxxim69", "pierpaolo" ], "count": 15 }, { "reaction": "๐Ÿ”ฅ", "users": [ "awacke1", "John6666", "Delta-Vector", "GoDjMike", "G30" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "Yuma42" ], "count": 1 } ]
2024-10-16T22:47:30.000Z
2024-10-21T20:36:54.599Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/fOsPIFqHjBq3gsx3_6lHd.png", "fullname": "Chad Canning", "name": "fusi0n", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66c26b6fb01b19d8c3c2467b/HIcQYcU6rOilwbuRCRStm.jpeg", "fullname": "DV", "name": "Delta-Vector", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 19, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2816, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/iB4Kdg31amSkGhSik6fv2.jpeg", "fullname": "Jรถrmungandr", "name": "Midgardsormr", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/c53d7b7ee7bf757dea5f847e62ea96b9.svg", "fullname": "Ding Dong", "name": "xxx31dingdong", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "/avatars/c7e4979f04fda14b73a43c398ce7da27.svg", "fullname": "Nurb4000", "name": "Nurb4000", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/bartowski/614979545779626
16,937
6
271267147746650
[ { "type": "text", "value": "๐Ÿ”ฅ Meta AI just blessed us with CoTracker v3, bleeding edge point tracking foundation model ๐Ÿคฉ", "raw": "๐Ÿ”ฅ Meta AI just blessed us with CoTracker v3, bleeding edge point tracking foundation model ๐Ÿคฉ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "model: ", "raw": "model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/facebook/cotracker3", "href": null, "resource": { "type": "model", "id": "facebook/cotracker3", "discussionNum": null }, "url": "https://huggingface.co/facebook/cotracker3", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "demo: ", "raw": "demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/facebook/cotracker", "href": null, "resource": { "type": "space", "id": "facebook/cotracker", "discussionNum": null }, "url": "https://huggingface.co/spaces/facebook/cotracker", "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ”ฅ Meta AI just blessed us with CoTracker v3, bleeding edge point tracking foundation model ๐Ÿคฉ model: https://huggingface.co/facebook/cotracker3 demo: https://huggingface.co/spaces/facebook/cotracker
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "jgitsolutions", "KvrParaskevi", "Rostenbach", "LeonceNsh" ], "count": 5 }, { "reaction": "๐Ÿš€", "users": [ "Ar4ikov" ], "count": 1 } ]
2024-10-16T21:00:37.000Z
2024-10-16T21:00:37.858Z
[]
/posts/merve/271267147746650
1,798
0
972049092550324
[ { "type": "text", "value": "Today I found out about the existence of ", "raw": "Today I found out about the existence of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/utter-project/EuroLLM-1.7B-Instruct", "href": null, "resource": { "type": "model", "id": "utter-project/EuroLLM-1.7B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/utter-project/EuroLLM-1.7B-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " and unexpectedly it is really good. I think it's a very underrated model - give it a try ", "raw": " and unexpectedly it is really good. I think it's a very underrated model - give it a try ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/nyuuzyou/EuroLLM-1.7B-Instruct", "href": null, "resource": { "type": "space", "id": "nyuuzyou/EuroLLM-1.7B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/spaces/nyuuzyou/EuroLLM-1.7B-Instruct", "code": null, "user": null, "label": null, "lang": null } ]
Today I found out about the existence of https://huggingface.co/utter-project/EuroLLM-1.7B-Instruct and unexpectedly it is really good. I think it's a very underrated model - give it a try https://huggingface.co/spaces/nyuuzyou/EuroLLM-1.7B-Instruct
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "attashe" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "prithivMLmods" ], "count": 1 } ]
2024-10-16T20:12:18.000Z
2024-10-16T20:12:18.711Z
[]
/posts/nyuuzyou/972049092550324
838
0
483705235958803
[ { "type": "text", "value": "Ever wondered how neural networks actually work under the hood? ", "raw": "Ever wondered how neural networks actually work under the hood? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In my latest video, I break down the core mathematical concepts behind neural networks in a way that's easy for IT professionals to understand. We'll explore:", "raw": "In my latest video, I break down the core mathematical concepts behind neural networks in a way that's easy for IT professionals to understand. We'll explore:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Neurons as logic gates", "raw": "- Neurons as logic gates", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Weighted sums and activation functions", "raw": "- Weighted sums and activation functions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Gradient descent and backpropagation", "raw": "- Gradient descent and backpropagation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "No complex equations or jargon, just clear explanations and helpful visuals! ", "raw": "No complex equations or jargon, just clear explanations and helpful visuals! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Watch now and unlock the mysteries of neural networks: ", "raw": "โžก๏ธ Watch now and unlock the mysteries of neural networks: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/L5_I1ZHoGnM", "href": "https://youtu.be/L5_I1ZHoGnM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Ever wondered how neural networks actually work under the hood? In my latest video, I break down the core mathematical concepts behind neural networks in a way that's easy for IT professionals to understand. We'll explore: - Neurons as logic gates - Weighted sums and activation functions - Gradient descent and backpropagation No complex equations or jargon, just clear explanations and helpful visuals! โžก๏ธ Watch now and unlock the mysteries of neural networks: https://youtu.be/L5_I1ZHoGnM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "crystal99", "Joseph717171" ], "count": 3 }, { "reaction": "๐Ÿ˜”", "users": [ "takeraparterer" ], "count": 1 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "ZeroWw" ], "count": 1 } ]
2024-10-16T17:42:37.000Z
2024-10-16T17:42:37.969Z
[]
/posts/TuringsSolutions/483705235958803
1,381
0
843028961140234
[ { "type": "text", "value": "Philosopher Gilles Deleuze in 1985-86 about society of control, probabilities, and power. Visionary words in an era of autoregressive models:", "raw": "Philosopher Gilles Deleuze in 1985-86 about society of control, probabilities, and power. Visionary words in an era of autoregressive models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"The biopolitics of populations appears when right sets about administering life, says Foucault, administering life in any open multiplicities whatever. You see the importance of the difference between discipline and biopolitics. The one is in an open space, with large multiplicities to which limits are not assignable. They can only be treated by the calculus of probabilities, hence the development of the calculus of probabilities and the meaning [sens] of the social control of probabilities, the probabilities of marriage in a nation, the probabilities of mortality, probabilities of natality. Natality, nuptiality, mortality โ€ฆ", "raw": "\"The biopolitics of populations appears when right sets about administering life, says Foucault, administering life in any open multiplicities whatever. You see the importance of the difference between discipline and biopolitics. The one is in an open space, with large multiplicities to which limits are not assignable. They can only be treated by the calculus of probabilities, hence the development of the calculus of probabilities and the meaning [sens] of the social control of probabilities, the probabilities of marriage in a nation, the probabilities of mortality, probabilities of natality. Natality, nuptiality, mortality โ€ฆ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "... When Foucault directly addresses the question of power, namely, one of his great theses: no, power does not repress, or it represses only secondarily. What does it do? It does something much more profound and, doubtless, more formidable that repressing: it forms, it shapes. It does not silence, it does worse: it makes speak. It disciplines, it standardizes [normalise]. But repression is entirely secondary in relation to the positive operations of power.", "raw": "... When Foucault directly addresses the question of power, namely, one of his great theses: no, power does not repress, or it represses only secondarily. What does it do? It does something much more profound and, doubtless, more formidable that repressing: it forms, it shapes. It does not silence, it does worse: it makes speak. It disciplines, it standardizes [normalise]. But repression is entirely secondary in relation to the positive operations of power.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Power does not repress, it disciplines, it manages, it controls, it standardizes, etcetera. It does not silence, it makes speak. It does not prevent acting, it makes act.\" ", "raw": "Power does not repress, it disciplines, it manages, it controls, it standardizes, etcetera. It does not silence, it makes speak. It does not prevent acting, it makes act.\" ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "From the Deleuze Seminars at Universitรฉ Paris 8 translated by Purdue University -> ", "raw": "From the Deleuze Seminars at Universitรฉ Paris 8 translated by Purdue University -> ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://deleuze.cla.purdue.edu/", "href": "https://deleuze.cla.purdue.edu/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Philosopher Gilles Deleuze in 1985-86 about society of control, probabilities, and power. Visionary words in an era of autoregressive models: "The biopolitics of populations appears when right sets about administering life, says Foucault, administering life in any open multiplicities whatever. You see the importance of the difference between discipline and biopolitics. The one is in an open space, with large multiplicities to which limits are not assignable. They can only be treated by the calculus of probabilities, hence the development of the calculus of probabilities and the meaning [sens] of the social control of probabilities, the probabilities of marriage in a nation, the probabilities of mortality, probabilities of natality. Natality, nuptiality, mortality โ€ฆ ... When Foucault directly addresses the question of power, namely, one of his great theses: no, power does not repress, or it represses only secondarily. What does it do? It does something much more profound and, doubtless, more formidable that repressing: it forms, it shapes. It does not silence, it does worse: it makes speak. It disciplines, it standardizes [normalise]. But repression is entirely secondary in relation to the positive operations of power. Power does not repress, it disciplines, it manages, it controls, it standardizes, etcetera. It does not silence, it makes speak. It does not prevent acting, it makes act." From the Deleuze Seminars at Universitรฉ Paris 8 translated by Purdue University -> https://deleuze.cla.purdue.edu/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63171caf1cc81c5e95ed7b92/29I5Lr0vLRcQR7AfCZcYj.jpeg", "fullname": "Akim Mousterou", "name": "AkimfromParis", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-16T13:44:16.000Z
2024-10-16T13:44:16.061Z
[]
/posts/AkimfromParis/843028961140234
602
0
600930862864914
[ { "type": "text", "value": "The Synthetic Data Generator now directly integrates with Argilla, so you can generate and curate your own high-quality datasets from pure natural language!", "raw": "The Synthetic Data Generator now directly integrates with Argilla, so you can generate and curate your own high-quality datasets from pure natural language!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Up next -> include dataset generation for text classification.", "raw": "Up next -> include dataset generation for text classification.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Other suggestions? Let us know.", "raw": "Other suggestions? Let us know.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Space: ", "raw": "Space: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/argilla/synthetic-data-generator", "href": null, "resource": { "type": "space", "id": "argilla/synthetic-data-generator", "discussionNum": null }, "url": "https://huggingface.co/spaces/argilla/synthetic-data-generator", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Synthetic Data Generator now directly integrates with Argilla, so you can generate and curate your own high-quality datasets from pure natural language! Up next -> include dataset generation for text classification. Other suggestions? Let us know. Space: https://huggingface.co/spaces/argilla/synthetic-data-generator
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/634ff41ff32062e9eb7b06a3/e1a4UwPu_AchxuoOSkTBg.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "davidberenstein1957", "clem" ], "count": 3 } ]
2024-10-16T10:42:56.000Z
2024-10-16T10:42:56.017Z
[]
/posts/davidberenstein1957/600930862864914
682
0
605522997356241
[ { "type": "text", "value": "While Google's Transformer might have introduced \"Attention is all you need,\" Microsoft and Tsinghua University are here with the DIFF Transformer, stating, \"Sparse-Attention is all you need.\"", "raw": "While Google's Transformer might have introduced \"Attention is all you need,\" Microsoft and Tsinghua University are here with the DIFF Transformer, stating, \"Sparse-Attention is all you need.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The DIFF Transformer outperforms traditional Transformers in scaling properties, requiring only about 65% of the model size or training tokens to achieve comparable performance.", "raw": "The DIFF Transformer outperforms traditional Transformers in scaling properties, requiring only about 65% of the model size or training tokens to achieve comparable performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The secret sauce? A differential attention mechanism that amplifies focus on relevant context while canceling out noise, leading to sparser and more effective attention patterns.", "raw": "The secret sauce? A differential attention mechanism that amplifies focus on relevant context while canceling out noise, leading to sparser and more effective attention patterns.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How?", "raw": "How?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It uses two separate softmax attention maps and subtracts them.", "raw": "- It uses two separate softmax attention maps and subtracts them.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It employs a learnable scalar ฮป for balancing the attention maps.", "raw": "- It employs a learnable scalar ฮป for balancing the attention maps.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It implements GroupNorm for each attention head independently.", "raw": "- It implements GroupNorm for each attention head independently.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It is compatible with FlashAttention for efficient computation.", "raw": "- It is compatible with FlashAttention for efficient computation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What do you get?", "raw": "What do you get?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Superior long-context modeling (up to 64K tokens).", "raw": "- Superior long-context modeling (up to 64K tokens).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Enhanced key information retrieval.", "raw": "- Enhanced key information retrieval.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Reduced hallucination in question-answering and summarization tasks.", "raw": "- Reduced hallucination in question-answering and summarization tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- More robust in-context learning, less affected by prompt order.", "raw": "- More robust in-context learning, less affected by prompt order.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Mitigation of activation outliers, opening doors for efficient quantization.", "raw": "- Mitigation of activation outliers, opening doors for efficient quantization.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Extensive experiments show DIFF Transformer's advantages across various tasks and model sizes, from 830M to 13.1B parameters.", "raw": "Extensive experiments show DIFF Transformer's advantages across various tasks and model sizes, from 830M to 13.1B parameters.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This innovative architecture could be a game-changer for the next generation of LLMs. What are your thoughts on DIFF Transformer's potential impact?", "raw": "This innovative architecture could be a game-changer for the next generation of LLMs. What are your thoughts on DIFF Transformer's potential impact?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
While Google's Transformer might have introduced "Attention is all you need," Microsoft and Tsinghua University are here with the DIFF Transformer, stating, "Sparse-Attention is all you need." The DIFF Transformer outperforms traditional Transformers in scaling properties, requiring only about 65% of the model size or training tokens to achieve comparable performance. The secret sauce? A differential attention mechanism that amplifies focus on relevant context while canceling out noise, leading to sparser and more effective attention patterns. How? - It uses two separate softmax attention maps and subtracts them. - It employs a learnable scalar ฮป for balancing the attention maps. - It implements GroupNorm for each attention head independently. - It is compatible with FlashAttention for efficient computation. What do you get? - Superior long-context modeling (up to 64K tokens). - Enhanced key information retrieval. - Reduced hallucination in question-answering and summarization tasks. - More robust in-context learning, less affected by prompt order. - Mitigation of activation outliers, opening doors for efficient quantization. Extensive experiments show DIFF Transformer's advantages across various tasks and model sizes, from 830M to 13.1B parameters. This innovative architecture could be a game-changer for the next generation of LLMs. What are your thoughts on DIFF Transformer's potential impact?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/BewC28D4diwrp34EP85pF.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "privategeek24", "calmodovar", "den0620", "Hampetiudo", "DeathGodlike", "cctuan" ], "count": 7 } ]
2024-10-16T08:04:37.000Z
2024-10-16T21:48:03.346Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64137e2150358a805203cbac/w9RQx8Q07UvgFyIZ3ce_k.jpeg", "fullname": "Jade", "name": "euclaise", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 89, "isFollowing": false } ]
/posts/singhsidhukuldeep/605522997356241
2,158
1
159417111780180
[ { "type": "text", "value": "MixGen3 is an innovative image generation service that utilizes LoRA (Low-Rank Adaptation) models. Its key features include:", "raw": "MixGen3 is an innovative image generation service that utilizes LoRA (Low-Rank Adaptation) models. Its key features include:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Integration of various LoRA models: Users can explore and select multiple LoRA models through a gallery.", "raw": "Integration of various LoRA models: Users can explore and select multiple LoRA models through a gallery.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Combination of LoRA models: Up to three LoRA models can be combined to express unique styles and content.", "raw": "Combination of LoRA models: Up to three LoRA models can be combined to express unique styles and content.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "User-friendly interface: An intuitive interface allows for easy model selection, prompt input, and image generation.", "raw": "User-friendly interface: An intuitive interface allows for easy model selection, prompt input, and image generation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Advanced settings: Various options are provided, including image size adjustment, random seed, and advanced configurations.", "raw": "Advanced settings: Various options are provided, including image size adjustment, random seed, and advanced configurations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Main applications of MixGen3:", "raw": "Main applications of MixGen3:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Content creation", "raw": "Content creation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Design and illustration", "raw": "Design and illustration", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Marketing and advertising", "raw": "Marketing and advertising", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Education and learning", "raw": "Education and learning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Value of MixGen3:", "raw": "Value of MixGen3:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enhancing creativity", "raw": "Enhancing creativity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Time-saving", "raw": "Time-saving", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Collaboration possibilities", "raw": "Collaboration possibilities", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Continuous development", "raw": "Continuous development", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Expected effects:", "raw": "Expected effects:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Increased content diversity", "raw": "Increased content diversity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lowered entry barrier for creation", "raw": "Lowered entry barrier for creation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Improved creativity", "raw": "Improved creativity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enhanced productivity", "raw": "Enhanced productivity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MixGen3 is bringing a new wave to the field of image generation by leveraging the advantages of LoRA models. Users can experience the service for free at ", "raw": "MixGen3 is bringing a new wave to the field of image generation by leveraging the advantages of LoRA models. Users can experience the service for free at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://openfree-mixgen3.hf.space", "href": "https://openfree-mixgen3.hf.space", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "contacts: [email protected]", "raw": "contacts: [email protected]", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
MixGen3 is an innovative image generation service that utilizes LoRA (Low-Rank Adaptation) models. Its key features include: Integration of various LoRA models: Users can explore and select multiple LoRA models through a gallery. Combination of LoRA models: Up to three LoRA models can be combined to express unique styles and content. User-friendly interface: An intuitive interface allows for easy model selection, prompt input, and image generation. Advanced settings: Various options are provided, including image size adjustment, random seed, and advanced configurations. Main applications of MixGen3: Content creation Design and illustration Marketing and advertising Education and learning Value of MixGen3: Enhancing creativity Time-saving Collaboration possibilities Continuous development Expected effects: Increased content diversity Lowered entry barrier for creation Improved creativity Enhanced productivity MixGen3 is bringing a new wave to the field of image generation by leveraging the advantages of LoRA models. Users can experience the service for free at https://openfree-mixgen3.hf.space contacts: [email protected]
{ "avatarUrl": "/avatars/e83b4373ec080aff5f69168bc78c137e.svg", "fullname": "openfree", "name": "openfree", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 24, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/2bYACqjR04atAhZDmjihC.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/2EzEeEQz3LnLqQMZEKaDp.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/Py8ISo2R2eXSHmP7A38FX.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/bkqnPmug6FxRAzceyeWkK.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/5cWF4OFBsUpDvYtpAI2dD.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/ECo9JNmVV46DylbbkJeSm.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/0QUEc-nYZnxHpW8qQO-Mt.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/ErHw1-n1HEWZSnRQNwDQL.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/F1WemgChMLpH0Ij0iDFHL.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/kSE5RJQsEhUOh7pJvNYwk.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/lnij5iH5qo79J7WK3XHZc.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/YvNEYlFtM6KyHqupQyUhZ.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/3DhMCzo2HIVFz5sPjywxd.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "aiqtech", "seawolf2357", "fantos", "aiqcamp", "ginipick", "fantaxy", "openfree", "John6666", "israa95", "Ar4ikov" ], "count": 10 }, { "reaction": "๐Ÿ‘", "users": [ "aiqtech", "seawolf2357", "fantos", "aiqcamp", "ginipick", "openfree", "DeathGodlike", "Sakalti", "Moibe" ], "count": 9 }, { "reaction": "๐Ÿš€", "users": [ "aiqtech", "seawolf2357", "fantos", "aiqcamp", "openfree", "John6666", "nazimali", "Ar4ikov" ], "count": 8 }, { "reaction": "โค๏ธ", "users": [ "aiqtech", "seawolf2357", "fantos", "aiqcamp", "ginipick", "openfree", "Ar4ikov" ], "count": 7 }, { "reaction": "๐Ÿ‘€", "users": [ "aiqtech", "seawolf2357", "fantos", "fantaxy", "openfree", "John6666" ], "count": 6 }, { "reaction": "๐Ÿค—", "users": [ "aiqtech", "seawolf2357", "ginipick", "openfree", "Ar4ikov", "Sasa77" ], "count": 6 }, { "reaction": "๐Ÿ˜Ž", "users": [ "aiqtech", "seawolf2357", "aiqcamp", "ginipick", "fantaxy", "openfree" ], "count": 6 }, { "reaction": "๐Ÿง ", "users": [ "aiqtech", "seawolf2357", "aiqcamp", "ginipick", "fantaxy", "openfree" ], "count": 6 }, { "reaction": "โž•", "users": [ "aiqtech", "seawolf2357", "aiqcamp", "openfree", "naimul011" ], "count": 5 }, { "reaction": "๐Ÿค", "users": [ "aiqtech", "seawolf2357", "fantos", "fantaxy", "openfree" ], "count": 5 }, { "reaction": "๐Ÿ˜”", "users": [ "aiqtech", "seawolf2357", "fantos", "ginipick", "openfree" ], "count": 5 }, { "reaction": "๐Ÿคฏ", "users": [ "aiqtech", "seawolf2357", "fantos", "aiqcamp", "openfree" ], "count": 5 } ]
2024-10-16T07:37:59.000Z
2024-10-16T07:39:20.510Z
[ { "avatarUrl": "/avatars/776a29ad75b68e7c905f6b12782afafb.svg", "fullname": "AIQ", "name": "aiqtech", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false } ]
/posts/openfree/159417111780180
3,970
1
732889644445945
[ { "type": "text", "value": "Model is always disabled?", "raw": "Model is always disabled?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#script...", "raw": "#script...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "from transformers import AutoModelForCausalLM", "raw": "from transformers import AutoModelForCausalLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "model = AutoModelForCausalLM.from_pretrained(\"distilbert/distilgpt2\",", "raw": "model = AutoModelForCausalLM.from_pretrained(\"distilbert/distilgpt2\",", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " token=\"xxxxxx\")", "raw": " token=\"xxxxxx\")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "That loads the model fine. But if used by index returned from VectorStoreIndex for QDrant like this:", "raw": "That loads the model fine. But if used by index returned from VectorStoreIndex for QDrant like this:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#script...", "raw": "#script...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True)", "raw": "query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "response = query_engine.query(", "raw": "response = query_engine.query(", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " \"What is formula 1?\"", "raw": " \"What is formula 1?\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "response.print_response_stream()", "raw": "response.print_response_stream()", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It errors out with a disabled error:", "raw": "It errors out with a disabled error:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AssertionError Traceback (most recent call last)", "raw": "AssertionError Traceback (most recent call last)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Cell In[34], line 1", "raw": "Cell In[34], line 1", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "----> 1 query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True)", "raw": "----> 1 query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 3 response = query_engine.query(", "raw": " 3 response = query_engine.query(", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 4 \"What is formula 1?\"", "raw": " 4 \"What is formula 1?\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 5 )", "raw": " 5 )", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 7 response.print_response_stream()", "raw": " 7 response.print_response_stream()", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "File ~/miniconda/lib/python3.9/site-packages/llama_index/core/indices/base.py:376, in BaseIndex.as_query_engine(self, llm, **kwargs)", "raw": "File ~/miniconda/lib/python3.9/site-packages/llama_index/core/indices/base.py:376, in BaseIndex.as_query_engine(self, llm, **kwargs)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 370 from llama_index.core.query_engine.retriever_query_engine import (", "raw": " 370 from llama_index.core.query_engine.retriever_query_engine import (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 371 RetrieverQueryEngine,", "raw": " 371 RetrieverQueryEngine,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 372 )", "raw": " 372 )", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 374 retriever = self.as_retriever(**kwargs)", "raw": " 374 retriever = self.as_retriever(**kwargs)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 375 llm = (", "raw": " 375 llm = (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "--> 376 resolve_llm(llm, callback_manager=self._callback_manager)", "raw": "--> 376 resolve_llm(llm, callback_manager=self._callback_manager)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 377 if llm", "raw": " 377 if llm", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 378 else Settings.llm", "raw": " 378 else Settings.llm", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 379 )", "raw": " 379 )", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 381 return RetrieverQueryEngine.from_args(", "raw": " 381 return RetrieverQueryEngine.from_args(", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 382 retriever,", "raw": " 382 retriever,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 383 llm=llm,", "raw": " 383 llm=llm,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 384 **kwargs,", "raw": " 384 **kwargs,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 385 )", "raw": " 385 )", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "File ~/miniconda/lib/python3.9/site-packages/llama_index/core/llms/utils.py:102, in resolve_llm(llm, callback_manager)", "raw": "File ~/miniconda/lib/python3.9/site-packages/llama_index/core/llms/utils.py:102, in resolve_llm(llm, callback_manager)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 99 print(\"LLM is explicitly disabled. Using MockLLM.\")", "raw": " 99 print(\"LLM is explicitly disabled. Using MockLLM.\")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 100 llm = MockLLM()", "raw": " 100 llm = MockLLM()", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "--> 102 assert isinstance(llm, LLM)", "raw": "--> 102 assert isinstance(llm, LLM)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 104 llm.callback_manager = callback_manager or Settings.callback_manager", "raw": " 104 llm.callback_manager = callback_manager or Settings.callback_manager", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 106 return llm", "raw": " 106 return llm", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AssertionError: ", "raw": "AssertionError: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So why is the LLM disabled?", "raw": "So why is the LLM disabled?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks!", "raw": "Thanks!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Model is always disabled? #script... from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2", token="xxxxxx") That loads the model fine. But if used by index returned from VectorStoreIndex for QDrant like this: #script... query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True) response = query_engine.query( "What is formula 1?" ) response.print_response_stream() It errors out with a disabled error: AssertionError Traceback (most recent call last) Cell In[34], line 1 ----> 1 query_engine = index_from_nodes.as_query_engine(llm=model, streaming=True) 3 response = query_engine.query( 4 "What is formula 1?" 5 ) 7 response.print_response_stream() File ~/miniconda/lib/python3.9/site-packages/llama_index/core/indices/base.py:376, in BaseIndex.as_query_engine(self, llm, **kwargs) 370 from llama_index.core.query_engine.retriever_query_engine import ( 371 RetrieverQueryEngine, 372 ) 374 retriever = self.as_retriever(**kwargs) 375 llm = ( --> 376 resolve_llm(llm, callback_manager=self._callback_manager) 377 if llm 378 else Settings.llm 379 ) 381 return RetrieverQueryEngine.from_args( 382 retriever, 383 llm=llm, 384 **kwargs, 385 ) File ~/miniconda/lib/python3.9/site-packages/llama_index/core/llms/utils.py:102, in resolve_llm(llm, callback_manager) 99 print("LLM is explicitly disabled. Using MockLLM.") 100 llm = MockLLM() --> 102 assert isinstance(llm, LLM) 104 llm.callback_manager = callback_manager or Settings.callback_manager 106 return llm AssertionError: So why is the LLM disabled? Thanks!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/5kE1rvdIVfUftt7B__ysg.png", "fullname": "Thomas Tong", "name": "gtvracer", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-16T00:17:18.000Z
2024-10-16T01:56:58.460Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/gtvracer/732889644445945
607
1
936434772663807
[ { "type": "text", "value": "๐Ÿ“ข We are giving extra two weeks before switching to the final stage of RuOpinionNE-2024.", "raw": "๐Ÿ“ข We are giving extra two weeks before switching to the final stage of RuOpinionNE-2024.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โฐ The final stage starts since 1-st of November 2024.", "raw": "โฐ The final stage starts since 1-st of November 2024.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We have already first baseline submission by ๐Ÿ‘จโ€๐Ÿ’ป RefalMachine that showcase F1 = 0.17 based on Qwen2 model series.", "raw": "We have already first baseline submission by ๐Ÿ‘จโ€๐Ÿ’ป RefalMachine that showcase F1 = 0.17 based on Qwen2 model series.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For those who wish to attend:", "raw": "For those who wish to attend:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Codalab: ", "raw": "๐Ÿ“Š Codalab: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://codalab.lisn.upsaclay.fr/competitions/20244", "href": "https://codalab.lisn.upsaclay.fr/competitions/20244", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ—’ Task: ", "raw": "๐Ÿ—’ Task: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview", "href": "https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”” Updates: ", "raw": "๐Ÿ”” Updates: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://t.me/RuOpinionNE2024", "href": "https://t.me/RuOpinionNE2024", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ™‹ Questions: ", "raw": "๐Ÿ™‹ Questions: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://nicolay-r.github.io/", "href": "https://nicolay-r.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿงช Past experiments: ", "raw": "๐Ÿงช Past experiments: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "href": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ“ข We are giving extra two weeks before switching to the final stage of RuOpinionNE-2024. โฐ The final stage starts since 1-st of November 2024. We have already first baseline submission by ๐Ÿ‘จโ€๐Ÿ’ป RefalMachine that showcase F1 = 0.17 based on Qwen2 model series. For those who wish to attend: ๐Ÿ“Š Codalab: https://codalab.lisn.upsaclay.fr/competitions/20244 ๐Ÿ—’ Task: https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview ๐Ÿ”” Updates: https://t.me/RuOpinionNE2024 ๐Ÿ™‹ Questions: https://nicolay-r.github.io/ ๐Ÿงช Past experiments: https://github.com/nicolay-r/RuSentNE-LLM-Benchmark
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/0ytMgQuXtm0Czmq1_cxoc.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/OkekLUZpZnWP-wHUtv3Nv.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/d9OHy2znk7GDdD7_SQvte.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/OyNZSHXdQWmPGtknrfTSn.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T22:43:39.000Z
2024-10-15T22:44:30.539Z
[]
/posts/nicolay-r/936434772663807
641
0
870209192100634
[ { "type": "text", "value": "Professors should ask students to write blog posts based on their final projects instead of having them do paper-like reports.", "raw": "Professors should ask students to write blog posts based on their final projects instead of having them do paper-like reports.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A single blog post, accessible to the entire internet, can have a greater career impact than dozens of reports that nobody will read.", "raw": "A single blog post, accessible to the entire internet, can have a greater career impact than dozens of reports that nobody will read.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Professors should ask students to write blog posts based on their final projects instead of having them do paper-like reports. A single blog post, accessible to the entire internet, can have a greater career impact than dozens of reports that nobody will read.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "ABS-Zeerho", "ts276", "nicoboss", "richardburleigh", "luised94" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T21:05:33.000Z
2024-10-15T21:05:33.985Z
[]
/posts/santiviquez/870209192100634
1,488
0
478045153069327
[ { "type": "text", "value": "Who's going to get to the most liked model on Hugging Face first: StabilityAI, Meta, Black Forest or someone else? The race is on!", "raw": "Who's going to get to the most liked model on Hugging Face first: StabilityAI, Meta, Black Forest or someone else? The race is on!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Who's going to get to the most liked model on Hugging Face first: StabilityAI, Meta, Black Forest or someone else? The race is on!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/f3yA9Yl7Ov_jFoqiTT-yW.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "YaTharThShaRma999", "andito", "John6666", "danielus" ], "count": 4 } ]
2024-10-15T17:12:29.000Z
2024-10-16T10:31:29.736Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }, { "avatarUrl": "/avatars/599ed6f8ecbe91c2f5d743e52e0574a1.svg", "fullname": "Zeerho Kelvin", "name": "ABS-Zeerho", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/clem/478045153069327
2,045
2
462722631017975
[ { "type": "text", "value": "New Book: Building Disruptive AI & LLM Technology from Scratch ", "raw": "New Book: Building Disruptive AI & LLM Technology from Scratch ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/404F1BZ", "href": "https://mltblog.com/404F1BZ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This book features new advances in game-changing AI and LLM technologies built by GenAItechLab.com. Written in simple English, it is best suited for engineers, developers, data scientists, analysts, consultants and anyone with an analytic background interested in starting a career in AI. The emphasis is on scalable enterprise solutions, easy to implement, yet outperforming vendors both in term of speed and quality, by several orders of magnitude.", "raw": "This book features new advances in game-changing AI and LLM technologies built by GenAItechLab.com. Written in simple English, it is best suited for engineers, developers, data scientists, analysts, consultants and anyone with an analytic background interested in starting a career in AI. The emphasis is on scalable enterprise solutions, easy to implement, yet outperforming vendors both in term of speed and quality, by several orders of magnitude.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Each topic comes with GitHub links, full Python code, datasets, illustrations, and real-life case studies, including from Fortune 100 company. Some of the material is presented as enterprise projects with solution, to help you build robust applications and boost your career. You donโ€™t need expensive GPU and cloud bandwidth to implement them: a standard laptop works.", "raw": "Each topic comes with GitHub links, full Python code, datasets, illustrations, and real-life case studies, including from Fortune 100 company. Some of the material is presented as enterprise projects with solution, to help you build robust applications and boost your career. You donโ€™t need expensive GPU and cloud bandwidth to implement them: a standard laptop works.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Part 1: Hallucination-Free LLM with Real-Time Fine-Tuning", "raw": "โžก๏ธ Part 1: Hallucination-Free LLM with Real-Time Fine-Tuning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Part 2: Outperforming Neural Nets and Classic AI", "raw": "โžก๏ธ Part 2: Outperforming Neural Nets and Classic AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ Part 3: Innovations in Statistical AI", "raw": "โžก๏ธ Part 3: Innovations in Statistical AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "About the author", "raw": "About the author", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Vincent Granville is a pioneering GenAI scientist and machine learning expert, co-founder of Data Science Central (acquired by a publicly traded company in 2020), Chief AI Scientist atย ML Techniquesย andย GenAI Techlab, former VC-funded executive, author (Elsevier) and patent owner โ€” one related to LLM. Vincentโ€™s past corporate experience includes Visa, Wells Fargo, eBay, NBC, Microsoft, and CNET.", "raw": "Vincent Granville is a pioneering GenAI scientist and machine learning expert, co-founder of Data Science Central (acquired by a publicly traded company in 2020), Chief AI Scientist atย ML Techniquesย andย GenAI Techlab, former VC-funded executive, author (Elsevier) and patent owner โ€” one related to LLM. Vincentโ€™s past corporate experience includes Visa, Wells Fargo, eBay, NBC, Microsoft, and CNET.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โžก๏ธ See content and get your copy, at ", "raw": "โžก๏ธ See content and get your copy, at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/404F1BZ", "href": "https://mltblog.com/404F1BZ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
New Book: Building Disruptive AI & LLM Technology from Scratch https://mltblog.com/404F1BZ This book features new advances in game-changing AI and LLM technologies built by GenAItechLab.com. Written in simple English, it is best suited for engineers, developers, data scientists, analysts, consultants and anyone with an analytic background interested in starting a career in AI. The emphasis is on scalable enterprise solutions, easy to implement, yet outperforming vendors both in term of speed and quality, by several orders of magnitude. Each topic comes with GitHub links, full Python code, datasets, illustrations, and real-life case studies, including from Fortune 100 company. Some of the material is presented as enterprise projects with solution, to help you build robust applications and boost your career. You donโ€™t need expensive GPU and cloud bandwidth to implement them: a standard laptop works. โžก๏ธ Part 1: Hallucination-Free LLM with Real-Time Fine-Tuning โžก๏ธ Part 2: Outperforming Neural Nets and Classic AI โžก๏ธ Part 3: Innovations in Statistical AI About the author Vincent Granville is a pioneering GenAI scientist and machine learning expert, co-founder of Data Science Central (acquired by a publicly traded company in 2020), Chief AI Scientist atย ML Techniquesย andย GenAI Techlab, former VC-funded executive, author (Elsevier) and patent owner โ€” one related to LLM. Vincentโ€™s past corporate experience includes Visa, Wells Fargo, eBay, NBC, Microsoft, and CNET. โžก๏ธ See content and get your copy, at https://mltblog.com/404F1BZ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png", "fullname": "Vincent Granville", "name": "vincentg64", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/-Gt8CdcgpCaDoFOPSwlDP.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "runkixt" ], "count": 2 } ]
2024-10-15T16:32:23.000Z
2024-10-15T16:32:23.583Z
[]
/posts/vincentg64/462722631017975
993
0
887348448417311
[ { "type": "text", "value": "Don't use an LLM when you can use a much cheaper model. ", "raw": "Don't use an LLM when you can use a much cheaper model. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The problem is that no one tells you how to actually do it.", "raw": "The problem is that no one tells you how to actually do it.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Just picking a pre-trained model (e.g., BERT) and throwing it at your problem won't work!", "raw": "Just picking a pre-trained model (e.g., BERT) and throwing it at your problem won't work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you want a small model to perform well on your problem, you need to fine-tune it.", "raw": "If you want a small model to perform well on your problem, you need to fine-tune it.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And to fine-tune it, you need data. ", "raw": "And to fine-tune it, you need data. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The good news is that you don't need a lot of data but instead high-quality data for your specific problem.", "raw": "The good news is that you don't need a lot of data but instead high-quality data for your specific problem.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In the latest livestream, I showed you guys how to get started with Argilla on the Hub! Hope to see you at the next one.", "raw": "In the latest livestream, I showed you guys how to get started with Argilla on the Hub! Hope to see you at the next one.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=BEe7shiG3rY", "href": "https://www.youtube.com/watch?v=BEe7shiG3rY", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Don't use an LLM when you can use a much cheaper model. The problem is that no one tells you how to actually do it. Just picking a pre-trained model (e.g., BERT) and throwing it at your problem won't work! If you want a small model to perform well on your problem, you need to fine-tune it. And to fine-tune it, you need data. The good news is that you don't need a lot of data but instead high-quality data for your specific problem. In the latest livestream, I showed you guys how to get started with Argilla on the Hub! Hope to see you at the next one. https://www.youtube.com/watch?v=BEe7shiG3rY
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "privategeek24", "John6666", "djuna", "mmx31", "nickandbro", "den0620", "pkalkman", "wldud5192", "Cbest" ], "count": 9 } ]
2024-10-15T16:16:04.000Z
2024-10-15T16:16:04.657Z
[]
/posts/davidberenstein1957/887348448417311
2,498
0
516022776106610
[ { "type": "text", "value": "๐Ÿšจ New Agent Benchmark ๐Ÿšจ", "raw": "๐Ÿšจ New Agent Benchmark ๐Ÿšจ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AgentHarm: A Benchmark for Measuring Harmfulness of LLM Agents", "raw": "AgentHarm: A Benchmark for Measuring Harmfulness of LLM Agents", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/ai-safety-institute/AgentHarm", "href": null, "resource": { "type": "dataset", "id": "ai-safety-institute/AgentHarm", "discussionNum": null }, "url": "https://huggingface.co/datasets/ai-safety-institute/AgentHarm", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Collaboration between UK AI Safety Institute and Gray Swan AI to create a dataset for measuring harmfulness of LLM agents.", "raw": "Collaboration between UK AI Safety Institute and Gray Swan AI to create a dataset for measuring harmfulness of LLM agents.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The benchmark contains both harmful and benign sets of 11 categories with varied difficulty levels and detailed evaluation, not only testing success rate but also tool level accuracy.", "raw": "The benchmark contains both harmful and benign sets of 11 categories with varied difficulty levels and detailed evaluation, not only testing success rate but also tool level accuracy.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We provide refusal and accuracy metrics across a wide range of models in both no attack and prompt attack scenarios.", "raw": "We provide refusal and accuracy metrics across a wide range of models in both no attack and prompt attack scenarios.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.09024", "href": null, "resource": { "type": "paper", "id": "2410.09024", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.09024", "code": null, "user": null, "label": "AgentHarm: A Benchmark for Measuring Harmfulness of LLM Agents (2410.09024)", "lang": null } ]
๐Ÿšจ New Agent Benchmark ๐Ÿšจ AgentHarm: A Benchmark for Measuring Harmfulness of LLM Agents https://huggingface.co/datasets/ai-safety-institute/AgentHarm Collaboration between UK AI Safety Institute and Gray Swan AI to create a dataset for measuring harmfulness of LLM agents. The benchmark contains both harmful and benign sets of 11 categories with varied difficulty levels and detailed evaluation, not only testing success rate but also tool level accuracy. We provide refusal and accuracy metrics across a wide range of models in both no attack and prompt attack scenarios. https://huggingface.co/papers/2410.09024
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62710bd57b9f120adb36e451/xv02RE8VgayDPDE6jkwV2.png", "fullname": "Mateusz Dziemian", "name": "mattmdjaga", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 27, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62710bd57b9f120adb36e451/RNfOGBG5Rzz3VaQvFzA6e.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62710bd57b9f120adb36e451/15LQSjCRaz1V22cM1n8Qx.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "mattmdjaga", "alielfilali01" ], "count": 2 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T14:14:04.000Z
2024-10-15T14:18:23.897Z
[]
/posts/mattmdjaga/516022776106610
1,427
0
856252574962650
[ { "type": "text", "value": "New York Times to Perplexity: Stop Using Our Stuff", "raw": "New York Times to Perplexity: Stop Using Our Stuff", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The publisher has sent generative-AI startup Perplexity a โ€œcease and desistโ€ notice demanding that the firm stop accessing and using its content, according to a copy of the letter reviewed by The Wall Street Journal.", "raw": "The publisher has sent generative-AI startup Perplexity a โ€œcease and desistโ€ notice demanding that the firm stop accessing and using its content, according to a copy of the letter reviewed by The Wall Street Journal.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Perplexity CEO Aravind Srinivas said in an interview that Perplexity isnโ€™t ignoring the Timesโ€™s efforts to block crawling of its site. He said the company plans on responding to the legal notice by the Timesโ€™s deadline of Oct. 30. ", "raw": "Perplexity CEO Aravind Srinivas said in an interview that Perplexity isnโ€™t ignoring the Timesโ€™s efforts to block crawling of its site. He said the company plans on responding to the legal notice by the Timesโ€™s deadline of Oct. 30. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€œWe are very much interested in working with every single publisher, including the New York Times,โ€ Srinivas said. โ€œWe have no interest in being anyoneโ€™s antagonist here.โ€ ", "raw": "โ€œWe are very much interested in working with every single publisher, including the New York Times,โ€ Srinivas said. โ€œWe have no interest in being anyoneโ€™s antagonist here.โ€ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.wsj.com/business/media/new-york-times-to-bezos-backed-ai-startup-stop-using-our-stuff-20faf2eb?mod=rss_Technology", "href": "https://www.wsj.com/business/media/new-york-times-to-bezos-backed-ai-startup-stop-using-our-stuff-20faf2eb?mod=rss_Technology", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
New York Times to Perplexity: Stop Using Our Stuff The publisher has sent generative-AI startup Perplexity a โ€œcease and desistโ€ notice demanding that the firm stop accessing and using its content, according to a copy of the letter reviewed by The Wall Street Journal. Perplexity CEO Aravind Srinivas said in an interview that Perplexity isnโ€™t ignoring the Timesโ€™s efforts to block crawling of its site. He said the company plans on responding to the legal notice by the Timesโ€™s deadline of Oct. 30. โ€œWe are very much interested in working with every single publisher, including the New York Times,โ€ Srinivas said. โ€œWe have no interest in being anyoneโ€™s antagonist here.โ€ https://www.wsj.com/business/media/new-york-times-to-bezos-backed-ai-startup-stop-using-our-stuff-20faf2eb?mod=rss_Technology
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T14:07:57.000Z
2024-10-15T14:07:57.535Z
[]
/posts/fdaudens/856252574962650
421
0
555552143006264
[ { "type": "text", "value": "I just published the first article in a pair. I could make it a longer tailed series, in case you liked em. This one dives into self-hosting Metaflow without needing S3, illustrated with a version tailored for Google Colab.", "raw": "I just published the first article in a pair. I could make it a longer tailed series, in case you liked em. This one dives into self-hosting Metaflow without needing S3, illustrated with a version tailored for Google Colab.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "find it @ ", "raw": "find it @ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/Aurelien-Morgan/stateful-metaflow-on-colab", "href": "https://huggingface.co/blog/Aurelien-Morgan/stateful-metaflow-on-colab", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I just published the first article in a pair. I could make it a longer tailed series, in case you liked em. This one dives into self-hosting Metaflow without needing S3, illustrated with a version tailored for Google Colab. find it @ https://huggingface.co/blog/Aurelien-Morgan/stateful-metaflow-on-colab
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651e93137b2a2e027f9e55df/5oXWJeEDCrMJLA4s_0I93.png", "fullname": "Aurรฉlien-Morgan CLAUDON", "name": "Aurelien-Morgan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T09:57:56.000Z
2024-10-22T17:05:28.962Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651e93137b2a2e027f9e55df/5oXWJeEDCrMJLA4s_0I93.png", "fullname": "Aurรฉlien-Morgan CLAUDON", "name": "Aurelien-Morgan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/Aurelien-Morgan/555552143006264
527
2
653150173791642
[ { "type": "text", "value": "๐Ÿ‘‹ We're excited to share our project on mobile face detection using MediaPipe and ZETIC.MLange.", "raw": "๐Ÿ‘‹ We're excited to share our project on mobile face detection using MediaPipe and ZETIC.MLange.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”‘ Key highlights:", "raw": "๐Ÿ”‘ Key highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Introduction to Mediapipe face detection model", "raw": "1. Introduction to Mediapipe face detection model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Developing on-device AI applications with ZETIC.MLange", "raw": "2. Developing on-device AI applications with ZETIC.MLange", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Guide to creating object detection apps utilizing Mobile NPUs", "raw": "3. Guide to creating object detection apps utilizing Mobile NPUs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ฑ Learn how to build a high-performance face detection app that operates entirely on-device, no cloud required! We explore:", "raw": "๐Ÿ“ฑ Learn how to build a high-performance face detection app that operates entirely on-device, no cloud required! We explore:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Real-time face analysis techniques", "raw": "- Real-time face analysis techniques", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Enhanced security measures", "raw": "- Enhanced security measures", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Privacy protection strategies", "raw": "- Privacy protection strategies", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐ŸŽฅ Check out our demo video showcasing real-time face detection:", "raw": "๐ŸŽฅ Check out our demo video showcasing real-time face detection:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Face detection Demo: ", "raw": "Face detection Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/GXtJKk7MdjQ?si=41UFKL8IBwx5nlxs", "href": "https://youtu.be/GXtJKk7MdjQ?si=41UFKL8IBwx5nlxs", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“š Full Tutorial", "raw": "๐Ÿ“š Full Tutorial", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For a step-by-step guide and in-depth discussion, read our full blog post:", "raw": "For a step-by-step guide and in-depth discussion, read our full blog post:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://zetic.ai/blog/implementing-face-detection-on-device-ai-with-zetic-mlange", "href": "https://zetic.ai/blog/implementing-face-detection-on-device-ai-with-zetic-mlange", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We'd love to hear your thoughts, experiences, or questions in the comments below.", "raw": "We'd love to hear your thoughts, experiences, or questions in the comments below.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿ‘‹ We're excited to share our project on mobile face detection using MediaPipe and ZETIC.MLange. ๐Ÿ”‘ Key highlights: 1. Introduction to Mediapipe face detection model 2. Developing on-device AI applications with ZETIC.MLange 3. Guide to creating object detection apps utilizing Mobile NPUs ๐Ÿ“ฑ Learn how to build a high-performance face detection app that operates entirely on-device, no cloud required! We explore: - Real-time face analysis techniques - Enhanced security measures - Privacy protection strategies ๐ŸŽฅ Check out our demo video showcasing real-time face detection: Face detection Demo: https://youtu.be/GXtJKk7MdjQ?si=41UFKL8IBwx5nlxs ๐Ÿ“š Full Tutorial For a step-by-step guide and in-depth discussion, read our full blog post: https://zetic.ai/blog/implementing-face-detection-on-device-ai-with-zetic-mlange We'd love to hear your thoughts, experiences, or questions in the comments below.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657566a76da136b50faaa48c/EvXVCEchiFsUiuLefhWsT.png", "fullname": "Yeonseok Kim", "name": "yeonseok-zeticai", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/657566a76da136b50faaa48c/c_IRECEfToNos_HEgt8Qp.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "yeonseok-zeticai", "John6666", "JJen889" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "yeonseok-zeticai", "JJen889" ], "count": 2 } ]
2024-10-15T09:21:42.000Z
2024-10-15T09:22:59.776Z
[]
/posts/yeonseok-zeticai/653150173791642
1,120
0
574917568042760
[ { "type": "text", "value": "Good folks from Universitat Politรจcnica de Catalunya, University of Groningen, and Meta have released \"A Primer on the Inner Workings of Transformer-based Language Models.\"", "raw": "Good folks from Universitat Politรจcnica de Catalunya, University of Groningen, and Meta have released \"A Primer on the Inner Workings of Transformer-based Language Models.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "They don't make survey papers like they used to, but this is an exciting new survey on Transformer LM interpretability!", "raw": "They don't make survey papers like they used to, but this is an exciting new survey on Transformer LM interpretability!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This comprehensive survey provides a technical deep dive into:", "raw": "This comprehensive survey provides a technical deep dive into:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Transformer architecture components (attention, FFN, residual stream)", "raw": "โ€ข Transformer architecture components (attention, FFN, residual stream)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Methods for localizing model behavior:", "raw": "โ€ข Methods for localizing model behavior:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Input attribution (gradient & perturbation-based)", "raw": " - Input attribution (gradient & perturbation-based)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Component importance (logit attribution, causal interventions)", "raw": " - Component importance (logit attribution, causal interventions)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Information decoding techniques:", "raw": "โ€ข Information decoding techniques:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Probing, linear feature analysis", "raw": " - Probing, linear feature analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Sparse autoencoders for disentangling features", "raw": " - Sparse autoencoders for disentangling features", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โ€ข Key insights on model internals:", "raw": "โ€ข Key insights on model internals:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Attention mechanisms (induction heads, copy suppression)", "raw": " - Attention mechanisms (induction heads, copy suppression)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - FFN neuron behaviors ", "raw": " - FFN neuron behaviors ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Residual stream properties", "raw": " - Residual stream properties", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Multi-component emergent behaviors", "raw": " - Multi-component emergent behaviors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The paper offers a unified notation and connects insights across different areas of interpretability research. It's a must-read for anyone working on understanding large language models!", "raw": "The paper offers a unified notation and connects insights across different areas of interpretability research. It's a must-read for anyone working on understanding large language models!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some fascinating technical highlights:", "raw": "Some fascinating technical highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Detailed breakdowns of attention head circuits (e.g., IOI task)", "raw": "- Detailed breakdowns of attention head circuits (e.g., IOI task)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Analysis of factual recall mechanisms ", "raw": "- Analysis of factual recall mechanisms ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Overview of polysemanticity and superposition", "raw": "- Overview of polysemanticity and superposition", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Discussion of grokking as circuit emergence", "raw": "- Discussion of grokking as circuit emergence", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What interpretability insights do you find most intriguing?", "raw": "What interpretability insights do you find most intriguing?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks from Universitat Politรจcnica de Catalunya, University of Groningen, and Meta have released "A Primer on the Inner Workings of Transformer-based Language Models." They don't make survey papers like they used to, but this is an exciting new survey on Transformer LM interpretability! This comprehensive survey provides a technical deep dive into: โ€ข Transformer architecture components (attention, FFN, residual stream) โ€ข Methods for localizing model behavior: - Input attribution (gradient & perturbation-based) - Component importance (logit attribution, causal interventions) โ€ข Information decoding techniques: - Probing, linear feature analysis - Sparse autoencoders for disentangling features โ€ข Key insights on model internals: - Attention mechanisms (induction heads, copy suppression) - FFN neuron behaviors - Residual stream properties - Multi-component emergent behaviors The paper offers a unified notation and connects insights across different areas of interpretability research. It's a must-read for anyone working on understanding large language models! Some fascinating technical highlights: - Detailed breakdowns of attention head circuits (e.g., IOI task) - Analysis of factual recall mechanisms - Overview of polysemanticity and superposition - Discussion of grokking as circuit emergence What interpretability insights do you find most intriguing?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/J4s72EfTjBjzT-uUbKxWx.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-15T06:35:26.000Z
2024-10-15T06:35:26.473Z
[]
/posts/singhsidhukuldeep/574917568042760
545
0
137840669116792
[ { "type": "text", "value": "Transformers are not all we need, that is being proven repeatedly now as more alternative frameworks emerge. Another such framework is Kolmogorov Arnold Network based Transformers. I break down exactly how these differ from Perceptron based Transformers and give you the link to my Colab where I create a model based on the research paper that absolutely destroys a standard Transformers based model. Check out the video here: ", "raw": "Transformers are not all we need, that is being proven repeatedly now as more alternative frameworks emerge. Another such framework is Kolmogorov Arnold Network based Transformers. I break down exactly how these differ from Perceptron based Transformers and give you the link to my Colab where I create a model based on the research paper that absolutely destroys a standard Transformers based model. Check out the video here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=Sw0euxNZCc4", "href": "https://www.youtube.com/watch?v=Sw0euxNZCc4", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Transformers are not all we need, that is being proven repeatedly now as more alternative frameworks emerge. Another such framework is Kolmogorov Arnold Network based Transformers. I break down exactly how these differ from Perceptron based Transformers and give you the link to my Colab where I create a model based on the research paper that absolutely destroys a standard Transformers based model. Check out the video here: https://www.youtube.com/watch?v=Sw0euxNZCc4
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ˜”", "users": [ "takeraparterer", "projektkush", "blenderwang" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Srilal", "djuna" ], "count": 3 }, { "reaction": "โค๏ธ", "users": [ "ZeroWw" ], "count": 1 } ]
2024-10-15T00:08:03.000Z
2024-10-15T00:08:03.915Z
[]
/posts/TuringsSolutions/137840669116792
2,104
0
454399238766446
[ { "type": "text", "value": "To demonstrate that it was possible, I performed a \"trapezoid\" gradient merge of a Llama 3 8B model onto Llama 3.1 8B Instruct, favoring the L3.1 model at the ends in order to preserve coherence and limiting the influence of the L3 model to at most 0.1 weight. Tested to 16k context length.", "raw": "To demonstrate that it was possible, I performed a \"trapezoid\" gradient merge of a Llama 3 8B model onto Llama 3.1 8B Instruct, favoring the L3.1 model at the ends in order to preserve coherence and limiting the influence of the L3 model to at most 0.1 weight. Tested to 16k context length.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/grimjim/Llama-Nephilim-Metamorphosis-v2-8B", "href": null, "resource": { "type": "model", "id": "grimjim/Llama-Nephilim-Metamorphosis-v2-8B", "discussionNum": null }, "url": "https://huggingface.co/grimjim/Llama-Nephilim-Metamorphosis-v2-8B", "code": null, "user": null, "label": null, "lang": null } ]
To demonstrate that it was possible, I performed a "trapezoid" gradient merge of a Llama 3 8B model onto Llama 3.1 8B Instruct, favoring the L3.1 model at the ends in order to preserve coherence and limiting the influence of the L3 model to at most 0.1 weight. Tested to 16k context length. https://huggingface.co/grimjim/Llama-Nephilim-Metamorphosis-v2-8B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65c992424936ab38ecf706b0/aq7vuHFPO1S93fwJk0Cuq.jpeg", "fullname": "Jim Lai", "name": "grimjim", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 166, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ˜Ž", "users": [ "John6666", "TuringsSolutions" ], "count": 2 } ]
2024-10-14T21:39:14.000Z
2024-10-14T21:42:00.156Z
[]
/posts/grimjim/454399238766446
1,964
0
475952015556168
[ { "type": "text", "value": "๐ŸŽ™ Introducing LiveATC Recordings (Partial 2024-08-26) Dataset - ", "raw": "๐ŸŽ™ Introducing LiveATC Recordings (Partial 2024-08-26) Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/liveatc", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/liveatc", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/liveatc", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights: ", "raw": "Dataset highlights: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 21,172 air traffic control audio recordings from LiveATC.net for August 26, 2024", "raw": "- 21,172 air traffic control audio recordings from LiveATC.net for August 26, 2024", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Multilingual content, primarily in English with potential for other languages", "raw": "- Multilingual content, primarily in English with potential for other languages", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: audio file, ICAO airport code, facility type, date, and time", "raw": "- Each entry includes: audio file, ICAO airport code, facility type, date, and time", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original MP3 files stored in .tar.zst archives, organized by ICAO airport code", "raw": "- Contains original MP3 files stored in .tar.zst archives, organized by ICAO airport code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers various airports and ATC facilities worldwide", "raw": "- Data covers various airports and ATC facilities worldwide", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Subject to LiveATC.net's Terms of Use for personal, non-commercial use only", "raw": "- Subject to LiveATC.net's Terms of Use for personal, non-commercial use only", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for audio classification, automatic speech recognition, and analysis of air traffic control communications. The inclusion of recordings from multiple airports allows for comparative analysis across different locations and facility types.", "raw": "The dataset can be used for audio classification, automatic speech recognition, and analysis of air traffic control communications. The inclusion of recordings from multiple airports allows for comparative analysis across different locations and facility types.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ™ Introducing LiveATC Recordings (Partial 2024-08-26) Dataset - https://huggingface.co/datasets/nyuuzyou/liveatc Dataset highlights: - 21,172 air traffic control audio recordings from LiveATC.net for August 26, 2024 - Multilingual content, primarily in English with potential for other languages - Each entry includes: audio file, ICAO airport code, facility type, date, and time - Contains original MP3 files stored in .tar.zst archives, organized by ICAO airport code - Data covers various airports and ATC facilities worldwide - Subject to LiveATC.net's Terms of Use for personal, non-commercial use only The dataset can be used for audio classification, automatic speech recognition, and analysis of air traffic control communications. The inclusion of recordings from multiple airports allows for comparative analysis across different locations and facility types.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "monsoon-nlp" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "manoumhd99" ], "count": 1 } ]
2024-10-14T20:50:03.000Z
2024-10-14T20:50:03.498Z
[]
/posts/nyuuzyou/475952015556168
1,562
0
161655174248232
[ { "type": "text", "value": "โšก๏ธ ๐“๐ก๐ข๐ฌ ๐ฆ๐จ๐ง๐ญ๐ก'๐ฌ ๐ฆ๐จ๐ฌ๐ญ ๐ข๐ฆ๐ฉ๐จ๐ซ๐ญ๐š๐ง๐ญ ๐›๐ซ๐ž๐š๐ค๐ญ๐ก๐ซ๐จ๐ฎ๐ ๐ก: ๐ƒ๐ข๐Ÿ๐Ÿ๐ž๐ซ๐ž๐ง๐ญ๐ข๐š๐ฅ ๐“๐ซ๐š๐ง๐ฌ๐Ÿ๐จ๐ซ๐ฆ๐ž๐ซ ๐ฏ๐š๐ฌ๐ญ๐ฅ๐ฒ ๐ข๐ฆ๐ฉ๐ซ๐จ๐ฏ๐ž๐ฌ ๐š๐ญ๐ญ๐ž๐ง๐ญ๐ข๐จ๐ง โ‡’ ๐›๐ž๐ญ๐ญ๐ž๐ซ ๐ซ๐ž๐ญ๐ซ๐ข๐ž๐ฏ๐š๐ฅ ๐š๐ง๐ ๐Ÿ๐ž๐ฐ๐ž๐ซ ๐ก๐š๐ฅ๐ฅ๐ฎ๐œ๐ข๐ง๐š๐ญ๐ข๐จ๐ง๐ฌ!", "raw": "โšก๏ธ ๐“๐ก๐ข๐ฌ ๐ฆ๐จ๐ง๐ญ๐ก'๐ฌ ๐ฆ๐จ๐ฌ๐ญ ๐ข๐ฆ๐ฉ๐จ๐ซ๐ญ๐š๐ง๐ญ ๐›๐ซ๐ž๐š๐ค๐ญ๐ก๐ซ๐จ๐ฎ๐ ๐ก: ๐ƒ๐ข๐Ÿ๐Ÿ๐ž๐ซ๐ž๐ง๐ญ๐ข๐š๐ฅ ๐“๐ซ๐š๐ง๐ฌ๐Ÿ๐จ๐ซ๐ฆ๐ž๐ซ ๐ฏ๐š๐ฌ๐ญ๐ฅ๐ฒ ๐ข๐ฆ๐ฉ๐ซ๐จ๐ฏ๐ž๐ฌ ๐š๐ญ๐ญ๐ž๐ง๐ญ๐ข๐จ๐ง โ‡’ ๐›๐ž๐ญ๐ญ๐ž๐ซ ๐ซ๐ž๐ญ๐ซ๐ข๐ž๐ฏ๐š๐ฅ ๐š๐ง๐ ๐Ÿ๐ž๐ฐ๐ž๐ซ ๐ก๐š๐ฅ๐ฅ๐ฎ๐œ๐ข๐ง๐š๐ญ๐ข๐จ๐ง๐ฌ!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thought that self-attention could not be improved anymore?", "raw": "Thought that self-attention could not be improved anymore?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Microsoft researchers have dropped a novel \"differential attention\" mechanism that amplifies focus on relevant context while canceling out noise. It sounds like a free lunch, but it does really seem to vastly improve LLM performance!", "raw": "Microsoft researchers have dropped a novel \"differential attention\" mechanism that amplifies focus on relevant context while canceling out noise. It sounds like a free lunch, but it does really seem to vastly improve LLM performance!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "raw": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง  Differential attention computes the difference between two separate softmax attention maps, canceling out noise and promoting sparse attention patterns", "raw": "๐Ÿง  Differential attention computes the difference between two separate softmax attention maps, canceling out noise and promoting sparse attention patterns", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ฅ DIFF Transformer outperforms standard Transformers while using 35-40% fewer parameters or training tokens", "raw": "๐Ÿ”ฅ DIFF Transformer outperforms standard Transformers while using 35-40% fewer parameters or training tokens", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“ Scales well to long contexts up to 64K tokens, leveraging increasing context length more effectively", "raw": "๐Ÿ“ Scales well to long contexts up to 64K tokens, leveraging increasing context length more effectively", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”Ž Dramatically improves key information retrieval, enhancing in-context learning, and possibly reducing risk of hallucinations ๐Ÿคฏ", "raw": "๐Ÿ”Ž Dramatically improves key information retrieval, enhancing in-context learning, and possibly reducing risk of hallucinations ๐Ÿคฏ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ”ข Reduces activation outliers, potentially enabling lower-bit quantization without performance drop!", "raw": "๐Ÿ”ข Reduces activation outliers, potentially enabling lower-bit quantization without performance drop!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "โš™๏ธ Can be directly implemented using existing FlashAttention kernels", "raw": "โš™๏ธ Can be directly implemented using existing FlashAttention kernels", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This new architecture could lead much more capable LLMs, with vastly improved strengths in long-context understanding and factual accuracy.", "raw": "This new architecture could lead much more capable LLMs, with vastly improved strengths in long-context understanding and factual accuracy.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But they didnโ€™t release weights on the Hub: letโ€™s wait for the community to train the first open-weights DiffTransformer! ๐Ÿš€", "raw": "But they didnโ€™t release weights on the Hub: letโ€™s wait for the community to train the first open-weights DiffTransformer! ๐Ÿš€", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read their paper ๐Ÿ‘‰ย ", "raw": "Read their paper ๐Ÿ‘‰ย ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.05258", "href": null, "resource": { "type": "paper", "id": "2410.05258", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.05258", "code": null, "user": null, "label": "Differential Transformer (2410.05258)", "lang": null } ]
โšก๏ธ ๐“๐ก๐ข๐ฌ ๐ฆ๐จ๐ง๐ญ๐ก'๐ฌ ๐ฆ๐จ๐ฌ๐ญ ๐ข๐ฆ๐ฉ๐จ๐ซ๐ญ๐š๐ง๐ญ ๐›๐ซ๐ž๐š๐ค๐ญ๐ก๐ซ๐จ๐ฎ๐ ๐ก: ๐ƒ๐ข๐Ÿ๐Ÿ๐ž๐ซ๐ž๐ง๐ญ๐ข๐š๐ฅ ๐“๐ซ๐š๐ง๐ฌ๐Ÿ๐จ๐ซ๐ฆ๐ž๐ซ ๐ฏ๐š๐ฌ๐ญ๐ฅ๐ฒ ๐ข๐ฆ๐ฉ๐ซ๐จ๐ฏ๐ž๐ฌ ๐š๐ญ๐ญ๐ž๐ง๐ญ๐ข๐จ๐ง โ‡’ ๐›๐ž๐ญ๐ญ๐ž๐ซ ๐ซ๐ž๐ญ๐ซ๐ข๐ž๐ฏ๐š๐ฅ ๐š๐ง๐ ๐Ÿ๐ž๐ฐ๐ž๐ซ ๐ก๐š๐ฅ๐ฅ๐ฎ๐œ๐ข๐ง๐š๐ญ๐ข๐จ๐ง๐ฌ! Thought that self-attention could not be improved anymore? Microsoft researchers have dropped a novel "differential attention" mechanism that amplifies focus on relevant context while canceling out noise. It sounds like a free lunch, but it does really seem to vastly improve LLM performance! ๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€: ๐Ÿง  Differential attention computes the difference between two separate softmax attention maps, canceling out noise and promoting sparse attention patterns ๐Ÿ”ฅ DIFF Transformer outperforms standard Transformers while using 35-40% fewer parameters or training tokens ๐Ÿ“ Scales well to long contexts up to 64K tokens, leveraging increasing context length more effectively ๐Ÿ”Ž Dramatically improves key information retrieval, enhancing in-context learning, and possibly reducing risk of hallucinations ๐Ÿคฏ ๐Ÿ”ข Reduces activation outliers, potentially enabling lower-bit quantization without performance drop! โš™๏ธ Can be directly implemented using existing FlashAttention kernels This new architecture could lead much more capable LLMs, with vastly improved strengths in long-context understanding and factual accuracy. But they didnโ€™t release weights on the Hub: letโ€™s wait for the community to train the first open-weights DiffTransformer! ๐Ÿš€ Read their paper ๐Ÿ‘‰ย https://huggingface.co/papers/2410.05258
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/1V96S9a8HMQAnrh-qeIqx.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "John6666" ], "count": 1 } ]
2024-10-14T15:52:59.000Z
2024-10-14T15:58:05.327Z
[]
/posts/m-ric/161655174248232
719
0
516298642216340
[ { "type": "text", "value": "Huge FLUX LoRA vs Fine Tuning / DreamBooth Experiments Completed, Moreover Batch Size 1 vs 7 Fully Tested as Well, Not Only for Realism But Also for Stylization - 15 vs 256 images having datasets compared as well (expressions / emotions tested too) - Used Kohya GUI for training", "raw": "Huge FLUX LoRA vs Fine Tuning / DreamBooth Experiments Completed, Moreover Batch Size 1 vs 7 Fully Tested as Well, Not Only for Realism But Also for Stylization - 15 vs 256 images having datasets compared as well (expressions / emotions tested too) - Used Kohya GUI for training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full files and article : ", "raw": "Full files and article : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112099700", "href": "https://www.patreon.com/posts/112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Download images in full resolution to see prompts and model names", "raw": "Download images in full resolution to see prompts and model names", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All trainings are done with Kohya GUI, perfectly can be done locally on Windows, and all trainings were 1024x1024 pixels", "raw": "All trainings are done with Kohya GUI, perfectly can be done locally on Windows, and all trainings were 1024x1024 pixels", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fine Tuning / DreamBooth works as low as 6 GB GPUs (0 quality degrade totally same as 48 GB config)", "raw": "Fine Tuning / DreamBooth works as low as 6 GB GPUs (0 quality degrade totally same as 48 GB config)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Best quality of LoRA requires 48 GB GPUs , 24 GB also works really good and minimum 8 GB GPU is necessary for LoRA (lots of quality degrade)", "raw": "Best quality of LoRA requires 48 GB GPUs , 24 GB also works really good and minimum 8 GB GPU is necessary for LoRA (lots of quality degrade)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full size grids are also shared for the followings: ", "raw": "Full size grids are also shared for the followings: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112099700", "href": "https://www.patreon.com/posts/112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Additionally, I have shared full training entire logs that you can see each checkpoint took time. I have shared best checkpoints, their step count and took time according to being either LoRA, Fine Tuning or Batch size 1 or 7 or 15 images or 256 images, so a very detailed article regarding completed.", "raw": "Additionally, I have shared full training entire logs that you can see each checkpoint took time. I have shared best checkpoints, their step count and took time according to being either LoRA, Fine Tuning or Batch size 1 or 7 or 15 images or 256 images, so a very detailed article regarding completed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check the images to see all shared files in the post.", "raw": "Check the images to see all shared files in the post.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Furthermore, a very very detailed analysis having article written and all latest DreamBooth / Fine Tuning configs and LoRA configs are shared with Kohya GUI installers for both Windows, Runpod and Massed Compute.", "raw": "Furthermore, a very very detailed analysis having article written and all latest DreamBooth / Fine Tuning configs and LoRA configs are shared with Kohya GUI installers for both Windows, Runpod and Massed Compute.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moreover, I have shared new 28 realism and 37 stylization testing prompts.", "raw": "Moreover, I have shared new 28 realism and 37 stylization testing prompts.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Current tutorials are as below:", "raw": "Current tutorials are as below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Windows requirements CUDA, Python, cuDNN, and such : ", "raw": "Windows requirements CUDA, Python, cuDNN, and such : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/DrhUHnYfwC0", "href": "https://youtu.be/DrhUHnYfwC0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use SwarmUI : ", "raw": "How to use SwarmUI : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/HKX8_F1Er_w", "href": "https://youtu.be/HKX8_F1Er_w", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use FLUX on SwarmUI : ", "raw": "How to use FLUX on SwarmUI : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/bupRePUOA18", "href": "https://youtu.be/bupRePUOA18", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use Kohya GUI for FLUX training : ", "raw": "How to use Kohya GUI for FLUX training : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/nySGu12Y05k", "href": "https://youtu.be/nySGu12Y05k", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use Kohya GUI for FLUX training on Cloud (RunPod and Massed Compute) : ", "raw": "How to use Kohya GUI for FLUX training on Cloud (RunPod and Massed Compute) : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/-uhL2nW7Ddw", "href": "https://youtu.be/-uhL2nW7Ddw", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Huge FLUX LoRA vs Fine Tuning / DreamBooth Experiments Completed, Moreover Batch Size 1 vs 7 Fully Tested as Well, Not Only for Realism But Also for Stylization - 15 vs 256 images having datasets compared as well (expressions / emotions tested too) - Used Kohya GUI for training Full files and article : https://www.patreon.com/posts/112099700 Download images in full resolution to see prompts and model names All trainings are done with Kohya GUI, perfectly can be done locally on Windows, and all trainings were 1024x1024 pixels Fine Tuning / DreamBooth works as low as 6 GB GPUs (0 quality degrade totally same as 48 GB config) Best quality of LoRA requires 48 GB GPUs , 24 GB also works really good and minimum 8 GB GPU is necessary for LoRA (lots of quality degrade) Full size grids are also shared for the followings: https://www.patreon.com/posts/112099700 Additionally, I have shared full training entire logs that you can see each checkpoint took time. I have shared best checkpoints, their step count and took time according to being either LoRA, Fine Tuning or Batch size 1 or 7 or 15 images or 256 images, so a very detailed article regarding completed. Check the images to see all shared files in the post. Furthermore, a very very detailed analysis having article written and all latest DreamBooth / Fine Tuning configs and LoRA configs are shared with Kohya GUI installers for both Windows, Runpod and Massed Compute. Moreover, I have shared new 28 realism and 37 stylization testing prompts. Current tutorials are as below: Windows requirements CUDA, Python, cuDNN, and such : https://youtu.be/DrhUHnYfwC0 How to use SwarmUI : https://youtu.be/HKX8_F1Er_w How to use FLUX on SwarmUI : https://youtu.be/bupRePUOA18 How to use Kohya GUI for FLUX training : https://youtu.be/nySGu12Y05k How to use Kohya GUI for FLUX training on Cloud (RunPod and Massed Compute) : https://youtu.be/-uhL2nW7Ddw
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/LlnTARAF_faF1bumcXo4i.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/IE6OvgGGo2todmrHSw6Xy.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/WvD5xIe6Di9kla3IW6mpc.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/lvFowmeGHNXTyXjGqIlHa.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/gz55qgXznBhr9iPrtvc6v.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/QXaazRAf43A5WhcYeRrgV.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Fq4SoHQ_q-MKHT1_dLnb9.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Arv5M-wVy9y6RrTZ0ajOD.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/t3Jgr99dN0W6gBFLYW2pg.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/dEbIky9rwavozIqZGdtPM.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/NCDDLkQLI4IrzbRS0NWs0.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/A96jiKjNqWL7kU56moxgQ.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/4qJ8tQxqG6A4tgJ0HuwxO.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_pw_juMtRrvdJNHsM7984.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/JGWBk6oVkUpele6en4wt7.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/a_6_4VuKrAFIbA3jkOsaX.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/qIaTvMKefwjmXAeB-kvqh.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/-vv1nQxgjA5b3aeO-MHdu.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/m-C3U1OM5GLu4Ze52vtYr.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ep9flZrWc9o2GR83ZieCK.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG", "nisten", "myblackbox83", "haonanzhang", "louisbrulenaudet", "Shivam77", "kz919" ], "count": 7 }, { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG", "RohanOfficial", "John6666", "kz919" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "โž•", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "kz919" ], "count": 2 } ]
2024-10-14T15:44:38.000Z
2024-10-14T15:44:38.261Z
[]
/posts/MonsterMMORPG/516298642216340
2,935
0
482765432458188
[ { "type": "text", "value": "we have a leaderboard for video LLMs, and most of the top models are open ones! ", "raw": "we have a leaderboard for video LLMs, and most of the top models are open ones! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/opencompass/openvlm_video_leaderboard", "href": null, "resource": { "type": "space", "id": "opencompass/openvlm_video_leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/opencompass/openvlm_video_leaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿ‘‘๐Ÿ‘", "raw": " ๐Ÿ‘‘๐Ÿ‘", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "we are so back ๐Ÿ”ฅ", "raw": "we are so back ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
we have a leaderboard for video LLMs, and most of the top models are open ones! https://huggingface.co/spaces/opencompass/openvlm_video_leaderboard ๐Ÿ‘‘๐Ÿ‘ we are so back ๐Ÿ”ฅ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613845120202-noauth.png", "fullname": "Merve Noyan", "name": "mervenoyan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 80, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60315285d2c57896177ce764/GxLM1Uazm-svBc4CkViBD.jpeg" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "John6666", "alielfilali01", "louisbrulenaudet", "KingNish", "damerajee", "KennyUTC", "adorkin", "AtAndDev" ], "count": 9 } ]
2024-10-14T15:32:42.000Z
2024-10-14T15:33:22.489Z
[]
/posts/mervenoyan/482765432458188
2,235
0
951412373294480
[ { "type": "mention", "value": null, "raw": "@kenshinn", "href": null, "resource": null, "url": null, "code": null, "user": "kenshinn", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.08565", "href": null, "resource": { "type": "paper", "id": "2410.08565", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.08565", "code": null, "user": null, "label": "Baichuan-Omni Technical Report (2410.08565)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "ไธบไป€ไนˆๆˆ‘ไผšๅœจไฝœ่€…ๅˆ—่กจไธŠ๏ผŒๆ˜ฏhuggingfaceไบบๅๅŒน้…็š„ๅ—๏ผŸ", "raw": "ไธบไป€ไนˆๆˆ‘ไผšๅœจไฝœ่€…ๅˆ—่กจไธŠ๏ผŒๆ˜ฏhuggingfaceไบบๅๅŒน้…็š„ๅ—๏ผŸ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@kenshinn https://huggingface.co/papers/2410.08565 ไธบไป€ไนˆๆˆ‘ไผšๅœจไฝœ่€…ๅˆ—่กจไธŠ๏ผŒๆ˜ฏhuggingfaceไบบๅๅŒน้…็š„ๅ—๏ผŸ
{ "avatarUrl": "/avatars/ee514cdab037c8f50058b4421954d919.svg", "fullname": "Ding Bowen", "name": "dbv", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/86a748a3264e6e0f4ee5eaf8f7032ecb.svg", "fullname": "Zhenglin Cheng", "name": "kenshinn", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11 } ]
[]
2024-10-14T15:23:24.000Z
2024-10-15T07:10:18.750Z
[ { "avatarUrl": "/avatars/86a748a3264e6e0f4ee5eaf8f7032ecb.svg", "fullname": "Zhenglin Cheng", "name": "kenshinn", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false }, { "avatarUrl": "/avatars/ee514cdab037c8f50058b4421954d919.svg", "fullname": "Ding Bowen", "name": "dbv", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/33d406b8d1f319af5a4e3c2dc59ea7f2.svg", "fullname": "Ding Bowen", "name": "Daniel21Ding", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/dbv/951412373294480
296
6
354397421832244
[ { "type": "text", "value": "I have finally completed a working full Azure and Microsoft MS Graph API implementation which can use all the interesting MS AI features in M365 products to manage CRUD patterns for the graph features across products.", "raw": "I have finally completed a working full Azure and Microsoft MS Graph API implementation which can use all the interesting MS AI features in M365 products to manage CRUD patterns for the graph features across products.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This app shows initial implementation of security, authentication, scopes, and access to Outlook, Calendar, Tasks, Onedrive and other apps for CRUD pattern as AI agent service skills to integrate with your AI workflow.", "raw": "This app shows initial implementation of security, authentication, scopes, and access to Outlook, Calendar, Tasks, Onedrive and other apps for CRUD pattern as AI agent service skills to integrate with your AI workflow.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Below are initial screens showing integration:", "raw": "Below are initial screens showing integration:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "URL: ", "raw": "URL: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/MSGraphAPI", "href": null, "resource": { "type": "space", "id": "awacke1/MSGraphAPI", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/MSGraphAPI", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Discussion: ", "raw": "Discussion: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/MSGraphAPI/discussions/5", "href": null, "resource": { "type": "space", "id": "awacke1/MSGraphAPI", "discussionNum": 5 }, "url": "https://huggingface.co/spaces/awacke1/MSGraphAPI/discussions/5", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Best of AI on ", "raw": "Best of AI on ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Azure", "href": null, "resource": null, "url": null, "code": null, "user": "Azure", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Microsoft", "href": null, "resource": null, "url": null, "code": null, "user": "Microsoft", "label": null, "lang": null }, { "type": "text", "value": " on ", "raw": " on ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@HuggingFace", "href": null, "resource": null, "url": null, "code": null, "user": "HuggingFace", "label": null, "lang": null }, { "type": "text", "value": " : ", "raw": " : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/microsoft", "href": "https://huggingface.co/microsoft", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.microsoft.com/en-us/research/", "href": "https://www.microsoft.com/en-us/research/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "---", "raw": "---", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Aaron", "raw": "Aaron", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I have finally completed a working full Azure and Microsoft MS Graph API implementation which can use all the interesting MS AI features in M365 products to manage CRUD patterns for the graph features across products. This app shows initial implementation of security, authentication, scopes, and access to Outlook, Calendar, Tasks, Onedrive and other apps for CRUD pattern as AI agent service skills to integrate with your AI workflow. Below are initial screens showing integration: URL: https://huggingface.co/spaces/awacke1/MSGraphAPI Discussion: https://huggingface.co/spaces/awacke1/MSGraphAPI/discussions/5 Best of AI on @Azure and @Microsoft on @HuggingFace : https://huggingface.co/microsoft https://www.microsoft.com/en-us/research/ --- Aaron
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/UXd1MwyNQBtxojseHbIBU.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/yT3Izo7aK7DWmuFu5H7hr.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/Cc6ZU1tWpA-lUp9HckjqL.png" } ]
[ { "avatarUrl": "/avatars/00ab65ea581b913d6c053da3a5657875.svg", "fullname": "M ", "name": "Azure", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-14T11:57:23.000Z
2024-10-14T11:57:23.708Z
[]
/posts/awacke1/354397421832244
576
0
629434906077022
[ { "type": "text", "value": "Multimodal Ichigo Llama 3.1 - Real Time Voice AI ๐Ÿ”ฅ", "raw": "Multimodal Ichigo Llama 3.1 - Real Time Voice AI ๐Ÿ”ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> WhisperSpeech X Llama 3.1 8B", "raw": "> WhisperSpeech X Llama 3.1 8B", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Trained on 50K hours of speech (7 languages)", "raw": "> Trained on 50K hours of speech (7 languages)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Continually trained on 45hrs 10x A1000s", "raw": "> Continually trained on 45hrs 10x A1000s", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> MLS -> WhisperVQ tokens -> Llama 3.1", "raw": "> MLS -> WhisperVQ tokens -> Llama 3.1", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Instruction tuned on 1.89M samples", "raw": "> Instruction tuned on 1.89M samples", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> 70% speech, 20% transcription, 10% text", "raw": "> 70% speech, 20% transcription, 10% text", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Apache 2.0 licensed โšก", "raw": "> Apache 2.0 licensed โšก", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Architecture:", "raw": "Architecture:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> WhisperSpeech/ VQ for Semantic Tokens", "raw": "> WhisperSpeech/ VQ for Semantic Tokens", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Llama 3.1 8B Instruct for Text backbone", "raw": "> Llama 3.1 8B Instruct for Text backbone", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Early fusion (Chameleon)", "raw": "> Early fusion (Chameleon)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm super bullish on HomeBrew/ Jan and early fusion, audio and text, multimodal models!", "raw": "I'm super bullish on HomeBrew/ Jan and early fusion, audio and text, multimodal models!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(P.S. Play with the demo on Hugging Face: ", "raw": "(P.S. Play with the demo on Hugging Face: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/jan-hq/Ichigo-llama3.1-s-instruct", "href": null, "resource": { "type": "space", "id": "jan-hq/Ichigo-llama3.1-s-instruct", "discussionNum": null }, "url": "https://huggingface.co/spaces/jan-hq/Ichigo-llama3.1-s-instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Multimodal Ichigo Llama 3.1 - Real Time Voice AI ๐Ÿ”ฅ > WhisperSpeech X Llama 3.1 8B > Trained on 50K hours of speech (7 languages) > Continually trained on 45hrs 10x A1000s > MLS -> WhisperVQ tokens -> Llama 3.1 > Instruction tuned on 1.89M samples > 70% speech, 20% transcription, 10% text > Apache 2.0 licensed โšก Architecture: > WhisperSpeech/ VQ for Semantic Tokens > Llama 3.1 8B Instruct for Text backbone > Early fusion (Chameleon) I'm super bullish on HomeBrew/ Jan and early fusion, audio and text, multimodal models! (P.S. Play with the demo on Hugging Face: https://huggingface.co/spaces/jan-hq/Ichigo-llama3.1-s-instruct)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/61b85ce86eb1f2c5e6233736/_-7SLdtcrmUAwfS4sMVHz.mp4" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "victor", "philipp-zettl", "Norod78", "diwank", "John6666", "YaTharThShaRma999", "s9133", "DRRK0", "Timilla", "Jooinjang", "blanchon", "edwixx", "anuragsingh17ai", "KingNish", "ariG23498", "WaveCut" ], "count": 16 }, { "reaction": "๐Ÿ‘", "users": [ "vigos", "YaTharThShaRma999", "chenduo", "Yadukrishnan", "edwixx" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "Sanzid", "JPG2000" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "alandao" ], "count": 1 }, { "reaction": "๐Ÿ‘€", "users": [ "den0620" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "Svngoku" ], "count": 1 } ]
2024-10-14T09:56:56.000Z
2024-10-14T09:56:56.112Z
[]
/posts/reach-vb/629434906077022
5,368
0
972610887106900
[ { "type": "text", "value": "Just started going through the latest \"State of AI Report 2024\", and I cannot get over the predictions!", "raw": "Just started going through the latest \"State of AI Report 2024\", and I cannot get over the predictions!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The report predicts major developments in AI over the next 12 months, including a $10B+ investment from a sovereign state into a large US AI lab, triggering national security scrutiny, and a viral app created by someone without coding skills.", "raw": "The report predicts major developments in AI over the next 12 months, including a $10B+ investment from a sovereign state into a large US AI lab, triggering national security scrutiny, and a viral app created by someone without coding skills.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It forecasts changes in data collection practices due to frontier labs facing trials, softer-than-expected EU AI Act implementations, and the rise of an open-source alternative to OpenAI GPT-4 outperforming in benchmarks.", "raw": "It forecasts changes in data collection practices due to frontier labs facing trials, softer-than-expected EU AI Act implementations, and the rise of an open-source alternative to OpenAI GPT-4 outperforming in benchmarks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "NVIDIAโ€™s dominance will remain largely unchallenged, investment in humanoid robots will decline, Appleโ€™s on-device AI research will gain momentum, and a research paper by an AI scientist will be accepted at a major conference.", "raw": "NVIDIAโ€™s dominance will remain largely unchallenged, investment in humanoid robots will decline, Appleโ€™s on-device AI research will gain momentum, and a research paper by an AI scientist will be accepted at a major conference.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lastly, a GenAI-based video game is expected to achieve breakout success.", "raw": "Lastly, a GenAI-based video game is expected to achieve breakout success.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yet to go through all 200+ pages... will post summarized thoughts later.", "raw": "Yet to go through all 200+ pages... will post summarized thoughts later.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Just started going through the latest "State of AI Report 2024", and I cannot get over the predictions! The report predicts major developments in AI over the next 12 months, including a $10B+ investment from a sovereign state into a large US AI lab, triggering national security scrutiny, and a viral app created by someone without coding skills. It forecasts changes in data collection practices due to frontier labs facing trials, softer-than-expected EU AI Act implementations, and the rise of an open-source alternative to OpenAI GPT-4 outperforming in benchmarks. NVIDIAโ€™s dominance will remain largely unchallenged, investment in humanoid robots will decline, Appleโ€™s on-device AI research will gain momentum, and a research paper by an AI scientist will be accepted at a major conference. Lastly, a GenAI-based video game is expected to achieve breakout success. Yet to go through all 200+ pages... will post summarized thoughts later.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/yRdnQNTGNSxUxhFflDo1X.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "devashishd12", "AdinaY", "DeFactOfficial", "victor", "alielfilali01" ], "count": 6 } ]
2024-10-14T06:32:17.000Z
2024-10-15T00:16:19.600Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/RhP7T-AlAn5Y-08gaCUcU.jpeg", "fullname": "Sam Rahimi", "name": "DeFactOfficial", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false } ]
/posts/singhsidhukuldeep/972610887106900
2,010
2
104620168146159
[ { "type": "text", "value": "she assert on my device until i give up AHAHEGHFDGHJHASUFSHD", "raw": "she assert on my device until i give up AHAHEGHFDGHJHASUFSHD", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
she assert on my device until i give up AHAHEGHFDGHJHASUFSHD
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "pepper13", "John6666", "takeraparterer", "Delta-Vector", "xi0v", "hogiahien" ], "count": 6 }, { "reaction": "๐Ÿ˜”", "users": [ "pepper13", "takeraparterer", "Delta-Vector", "xi0v", "hogiahien" ], "count": 5 }, { "reaction": "๐Ÿคฏ", "users": [ "Delta-Vector" ], "count": 1 } ]
2024-10-14T01:18:42.000Z
2024-10-14T04:28:25.815Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/kWyhns_G_75wwOdcgftGB.png", "fullname": "Bell ~", "name": "pepper13", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/nroggendorff/104620168146159
2,105
3
251694388779899
[ { "type": "text", "value": "MiniSearch is celebrating its 1st birthday! ๐ŸŽ‰", "raw": "MiniSearch is celebrating its 1st birthday! ๐ŸŽ‰", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exactly one year ago, I shared the initial version of this side-project on Hugging Face. Since then, there have been numerous changes under the hood. Nowadays it uses [Web-LLM](", "raw": "Exactly one year ago, I shared the initial version of this side-project on Hugging Face. Since then, there have been numerous changes under the hood. Nowadays it uses [Web-LLM](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/mlc-ai/web-llm", "href": "https://github.com/mlc-ai/web-llm", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "), [Wllama](", "raw": "), [Wllama](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/ngxson/wllama", "href": "https://github.com/ngxson/wllama", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") and [SearXNG](", "raw": ") and [SearXNG](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/searxng/searxng", "href": "https://github.com/searxng/searxng", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "). I use it daily as my default search engine and have done my best to make it useful. I hope it's interesting for you too!", "raw": "). I use it daily as my default search engine and have done my best to make it useful. I hope it's interesting for you too!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HF Space: ", "raw": "HF Space: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Felladrin/MiniSearch", "href": null, "resource": { "type": "space", "id": "Felladrin/MiniSearch", "discussionNum": null }, "url": "https://huggingface.co/spaces/Felladrin/MiniSearch", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Embeddable URL: ", "raw": "Embeddable URL: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://felladrin-minisearch.hf.space", "href": "https://felladrin-minisearch.hf.space", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
MiniSearch is celebrating its 1st birthday! ๐ŸŽ‰ Exactly one year ago, I shared the initial version of this side-project on Hugging Face. Since then, there have been numerous changes under the hood. Nowadays it uses [Web-LLM](https://github.com/mlc-ai/web-llm), [Wllama](https://github.com/ngxson/wllama) and [SearXNG](https://github.com/searxng/searxng). I use it daily as my default search engine and have done my best to make it useful. I hope it's interesting for you too! HF Space: https://huggingface.co/spaces/Felladrin/MiniSearch Embeddable URL: https://felladrin-minisearch.hf.space
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6454aff9273f649830234978/cvVV08YHJpJx9xWVZqgVW.jpeg", "fullname": "Victor Nogueira", "name": "Felladrin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 88, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6454aff9273f649830234978/F9vY2x3najX-nGACprLxM.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Locutusque", "dillfrescott", "shtefcs", "John6666", "Zmu", "Nymbo", "DeathGodlike", "KatyaKnw" ], "count": 8 }, { "reaction": "โค๏ธ", "users": [ "dillfrescott", "shtefcs", "maywell", "Nymbo" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "shtefcs", "Nymbo" ], "count": 2 } ]
2024-10-13T22:12:25.000Z
2024-10-14T01:16:47.392Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/TwR65k1JgO_t3l4pM1UjA.png", "fullname": "Stefan Smiljkovic", "name": "shtefcs", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/Felladrin/251694388779899
2,711
1
372460706333339
[ { "type": "text", "value": "๐ŸŽ“ Introducing Svitppt.com.ua Presentations Dataset - ", "raw": "๐ŸŽ“ Introducing Svitppt.com.ua Presentations Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/svitppt", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/svitppt", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/svitppt", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 18,001 presentations from svitppt.com.ua, a platform for storing and viewing presentations for Ukrainian school students", "raw": "- 18,001 presentations from svitppt.com.ua, a platform for storing and viewing presentations for Ukrainian school students", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in Ukrainian, with some Russian and English content", "raw": "- Primarily in Ukrainian, with some Russian and English content", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "raw": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original PPT/PPTX files in addition to metadata", "raw": "- Contains original PPT/PPTX files in addition to metadata", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers a wide range of educational topics and presentation materials", "raw": "- Data covers a wide range of educational topics and presentation materials", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "raw": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing educational presentation content in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Ukrainian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings.", "raw": "The dataset can be used for analyzing educational presentation content in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Ukrainian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐ŸŽ“ Introducing Svitppt.com.ua Presentations Dataset - https://huggingface.co/datasets/nyuuzyou/svitppt Dataset highlights: - 18,001 presentations from svitppt.com.ua, a platform for storing and viewing presentations for Ukrainian school students - Primarily in Ukrainian, with some Russian and English content - Each entry includes: URL, title, download URL, filepath, and extracted text content (where available) - Contains original PPT/PPTX files in addition to metadata - Data covers a wide range of educational topics and presentation materials - Dedicated to the public domain under Creative Commons Zero (CC0) license The dataset can be used for analyzing educational presentation content in Ukrainian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Ukrainian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in Ukrainian educational settings.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-13T20:08:26.000Z
2024-10-13T20:08:35.901Z
[]
/posts/nyuuzyou/372460706333339
487
0
515548178620285
[ { "type": "text", "value": "The question is on the left and the answer is on the right.", "raw": "The question is on the left and the answer is on the right.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The question is on the left and the answer is on the right.
{ "avatarUrl": "/avatars/61126da32e45508069816b5e54a3e645.svg", "fullname": "huggingface.co", "name": "huggingface0", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿง ", "users": [ "John6666" ], "count": 1 } ]
2024-10-13T19:47:10.000Z
2024-10-13T19:47:10.468Z
[]
/posts/huggingface0/515548178620285
476
0
974002079617092
[ { "type": "text", "value": "Is Entropix the Chain of Thought Reasoning method behind GPTo1? Using a mixture of entropy, varentropy, and prompt engineering, the Entropix framework can straight up make SmolLLM 330M look like Llama 3.2. Check out this video for a full run down and the description of the video for all of the related resources you need: ", "raw": "Is Entropix the Chain of Thought Reasoning method behind GPTo1? Using a mixture of entropy, varentropy, and prompt engineering, the Entropix framework can straight up make SmolLLM 330M look like Llama 3.2. Check out this video for a full run down and the description of the video for all of the related resources you need: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/senq4_42tPI", "href": "https://youtu.be/senq4_42tPI", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Is Entropix the Chain of Thought Reasoning method behind GPTo1? Using a mixture of entropy, varentropy, and prompt engineering, the Entropix framework can straight up make SmolLLM 330M look like Llama 3.2. Check out this video for a full run down and the description of the video for all of the related resources you need: https://youtu.be/senq4_42tPI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-10-13T17:10:51.000Z
2024-10-27T12:46:37.396Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/660bd4793f16e207645fb119/IHT-fPdrfdgti7bKo8UZo.jpeg", "fullname": "No-mad", "name": "no-mad", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 13, "isFollowing": false }, { "avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg", "fullname": "Robert Sinclair", "name": "ZeroWw", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 76, "isFollowing": false } ]
/posts/TuringsSolutions/974002079617092
516
12
732435777248922
[ { "type": "text", "value": "SwiftMistralCoreML", "raw": "SwiftMistralCoreML", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hi Everyone,", "raw": "Hi Everyone,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I have created a Swift library to interact with Mistral 7B models in CoreML on macOS.", "raw": "I have created a Swift library to interact with Mistral 7B models in CoreML on macOS.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I hope you find it helpful.", "raw": "I hope you find it helpful.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/cardona/SwiftMistralCoreML", "href": "https://github.com/cardona/SwiftMistralCoreML", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "An open-source Swift library that enables macOS and iOS projects to utilize the Mistral-Interact7B models (INT4 and upcoming FP16) in chat mode. This library includes a complete Swift implementation of the tokenizer and Byte Pair Encoding (BPE) encoder, providing an out-of-the-box solution for integrating advanced language models into your Swift applications.", "raw": "An open-source Swift library that enables macOS and iOS projects to utilize the Mistral-Interact7B models (INT4 and upcoming FP16) in chat mode. This library includes a complete Swift implementation of the tokenizer and Byte Pair Encoding (BPE) encoder, providing an out-of-the-box solution for integrating advanced language models into your Swift applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Features", "raw": "Features", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full Swift Implementation: Includes tokenizer and BPE encoder written entirely in Swift.", "raw": "Full Swift Implementation: Includes tokenizer and BPE encoder written entirely in Swift.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "CoreML Integration: Leverages Apple's CoreML framework to run Mistral-Interact7B models efficiently.", "raw": "CoreML Integration: Leverages Apple's CoreML framework to run Mistral-Interact7B models efficiently.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Multiple Decoding Strategies: Supports Greedy and Top-K sampling, with plans to add more strategies.", "raw": "Multiple Decoding Strategies: Supports Greedy and Top-K sampling, with plans to add more strategies.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Chat Functionality: Designed to work in chat mode for interactive applications.", "raw": "Chat Functionality: Designed to work in chat mode for interactive applications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FP16 Support (Coming Soon): Future version will support FP16 models for improved performance.", "raw": "FP16 Support (Coming Soon): Future version will support FP16 models for improved performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
SwiftMistralCoreML Hi Everyone, I have created a Swift library to interact with Mistral 7B models in CoreML on macOS. I hope you find it helpful. https://github.com/cardona/SwiftMistralCoreML An open-source Swift library that enables macOS and iOS projects to utilize the Mistral-Interact7B models (INT4 and upcoming FP16) in chat mode. This library includes a complete Swift implementation of the tokenizer and Byte Pair Encoding (BPE) encoder, providing an out-of-the-box solution for integrating advanced language models into your Swift applications. Features Full Swift Implementation: Includes tokenizer and BPE encoder written entirely in Swift. CoreML Integration: Leverages Apple's CoreML framework to run Mistral-Interact7B models efficiently. Multiple Decoding Strategies: Supports Greedy and Top-K sampling, with plans to add more strategies. Chat Functionality: Designed to work in chat mode for interactive applications. FP16 Support (Coming Soon): Future version will support FP16 models for improved performance.
{ "avatarUrl": "/avatars/8f85a0d335476966fe3da255ad0e55d6.svg", "fullname": "Oscar", "name": "Skataka", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "shtefcs", "John6666", "Joseph717171", "victor", "xi0v", "younissaqib" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "DhamuR", "fibaek", "Joseph717171", "xi0v", "stodev" ], "count": 5 } ]
2024-10-13T07:28:45.000Z
2024-10-13T19:26:26.708Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/TwR65k1JgO_t3l4pM1UjA.png", "fullname": "Stefan Smiljkovic", "name": "shtefcs", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60df8d073009e0a7f77a61c8/h0hQ5wMxS05XkGiwbrBvw.jpeg", "fullname": "Dennis Zollmann", "name": "wottpal", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/8f85a0d335476966fe3da255ad0e55d6.svg", "fullname": "Oscar", "name": "Skataka", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false } ]
/posts/Skataka/732435777248922
2,426
3
152623777236688
[ { "type": "text", "value": "Hello Everyone,", "raw": "Hello Everyone,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I signed up as Pro and started a ZeroGPU space with a Gradio chatbot project as default. When building the space, it won't even start the sample Gradio app.. Pretty disappointing when right out of the box, it fails...", "raw": "I signed up as Pro and started a ZeroGPU space with a Gradio chatbot project as default. When building the space, it won't even start the sample Gradio app.. Pretty disappointing when right out of the box, it fails...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have anyone encountered this yet?", "raw": "Have anyone encountered this yet?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks...", "raw": "Thanks...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is the output, odd since it seems to be just a warning. So why wouldn't it start?", "raw": "This is the output, odd since it seems to be just a warning. So why wouldn't it start?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "/usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:228: UserWarning: The 'tuples' format for chatbot messages is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style 'role' and 'content' keys.", "raw": "/usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:228: UserWarning: The 'tuples' format for chatbot messages is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style 'role' and 'content' keys.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " warnings.warn(", "raw": " warnings.warn(", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Running on local URL: ", "raw": "* Running on local URL: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "http://0.0.0.0:7860", "href": "http://0.0.0.0:7860", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ", with SSR โšก", "raw": ", with SSR โšก", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To create a public link, set ", "raw": "To create a public link, set ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`share=True`", "href": null, "resource": null, "url": null, "code": "share=True", "user": null, "label": null, "lang": null }, { "type": "text", "value": " in ", "raw": " in ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`launch()`", "href": null, "resource": null, "url": null, "code": "launch()", "user": null, "label": null, "lang": null }, { "type": "text", "value": ".", "raw": ".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Stopping Node.js server...", "raw": "Stopping Node.js server...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello Everyone, I signed up as Pro and started a ZeroGPU space with a Gradio chatbot project as default. When building the space, it won't even start the sample Gradio app.. Pretty disappointing when right out of the box, it fails... Have anyone encountered this yet? Thanks... This is the output, odd since it seems to be just a warning. So why wouldn't it start? /usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:228: UserWarning: The 'tuples' format for chatbot messages is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style 'role' and 'content' keys. warnings.warn( * Running on local URL: http://0.0.0.0:7860, with SSR โšก To create a public link, set `share=True` in `launch()`. Stopping Node.js server...
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/5kE1rvdIVfUftt7B__ysg.png", "fullname": "Thomas Tong", "name": "gtvracer", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "gtvracer" ], "count": 2 } ]
2024-10-13T04:44:43.000Z
2024-10-14T02:38:09.089Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/5kE1rvdIVfUftt7B__ysg.png", "fullname": "Thomas Tong", "name": "gtvracer", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/gtvracer/152623777236688
1,454
6
372337376035278
[ { "type": "text", "value": "I am looking for an open source realtime TTS voice cloning model. Need Suggestions....! ", "raw": "I am looking for an open source realtime TTS voice cloning model. Need Suggestions....! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I am looking for an open source realtime TTS voice cloning model. Need Suggestions....!
{ "avatarUrl": "/avatars/ca03c61bfe1b7fce65cc9fc81a82ae8b.svg", "fullname": "ahsan raza", "name": "ahsanr", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "UCCTeam", "den0620" ], "count": 3 } ]
2024-10-12T22:37:48.000Z
2024-10-12T23:19:18.552Z
[ { "avatarUrl": "/avatars/c82779fdf94f80cdb5020504f83c818b.svg", "fullname": "Yatharth Sharma", "name": "YaTharThShaRma999", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false } ]
/posts/ahsanr/372337376035278
996
1
596746813944053
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ… (October 5 - October 12, 2024)", "raw": "๐Ÿ… (October 5 - October 12, 2024)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ… Medical AI Paper of the Week:", "raw": "๐Ÿ… Medical AI Paper of the Week:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MMedAgent: Learning to Use Medical Tools with Multi-modal Agent", "raw": "MMedAgent: Learning to Use Medical Tools with Multi-modal Agent", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "YouTube podcast of weekly papers: ", "raw": "YouTube podcast of weekly papers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/OD3C5jirszw", "href": "https://youtu.be/OD3C5jirszw", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM & Other Models:", "raw": "Medical LLM & Other Models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLM Framework for Rare Disease Phenotyping", "raw": "- LLM Framework for Rare Disease Phenotyping", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ONCOPILOT: CT Foundation Model for Tumors", "raw": "- ONCOPILOT: CT Foundation Model for Tumors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- FMBench: Fairness in Medical MLLMs", "raw": "- FMBench: Fairness in Medical MLLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GP-GPT: LLM for Gene-Phenotype Mapping", "raw": "- GP-GPT: LLM for Gene-Phenotype Mapping", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedAdapter: Efficient LLM Medical Adaptation", "raw": "- MedAdapter: Efficient LLM Medical Adaptation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- RespLLM: Multimodal LLM for Respiratory Health", "raw": "- RespLLM: Multimodal LLM for Respiratory Health", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MDAgents: LLM Collaboration for Medical Decisions", "raw": "- MDAgents: LLM Collaboration for Medical Decisions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedVisionLlama: LLM Medical Image Segmentation", "raw": "- MedVisionLlama: LLM Medical Image Segmentation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Frameworks and Methodologies:", "raw": "Frameworks and Methodologies:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ReXplain: AI-Driven Radiology Video Reports", "raw": "- ReXplain: AI-Driven Radiology Video Reports", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- BioDiscoveryAgent: AI for Genetic Experiments", "raw": "- BioDiscoveryAgent: AI for Genetic Experiments", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ZODIAC: Multi-Agent Cardiological Diagnostics", "raw": "- ZODIAC: Multi-Agent Cardiological Diagnostics", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- OLAPH: Improving Biomedical LLM Factuality", "raw": "- OLAPH: Improving Biomedical LLM Factuality", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- OmniGenBench: Benchmarking Genomic Models", "raw": "- OmniGenBench: Benchmarking Genomic Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM Applications:", "raw": "Medical LLM Applications:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MMedAgent: Multimodal Medical Tool Use", "raw": "- MMedAgent: Multimodal Medical Tool Use", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- AI for Mental Health Support", "raw": "- AI for Mental Health Support", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMs for Mental Disorders Detection", "raw": "- LLMs for Mental Disorders Detection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- PharmacyGPT: AI Pharmacist Framework", "raw": "- PharmacyGPT: AI Pharmacist Framework", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLMs & Benchmarks:", "raw": "Medical LLMs & Benchmarks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- CliMedBench: Chinese Medical LLM Benchmark", "raw": "- CliMedBench: Chinese Medical LLM Benchmark", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MedSafetyBench: Evaluating Medical LLM Safety", "raw": "- MedSafetyBench: Evaluating Medical LLM Safety", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AI in Healthcare Ethics:", "raw": "AI in Healthcare Ethics:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLM-based Medical Dialogue Preference Alignment", "raw": "- LLM-based Medical Dialogue Preference Alignment", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Trustworthiness in Medical Imaging Models", "raw": "- Trustworthiness in Medical Imaging Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Week in Medical AI: Top Research Papers/Models ๐Ÿ… (October 5 - October 12, 2024) ๐Ÿ… Medical AI Paper of the Week: MMedAgent: Learning to Use Medical Tools with Multi-modal Agent YouTube podcast of weekly papers: https://youtu.be/OD3C5jirszw Medical LLM & Other Models: - LLM Framework for Rare Disease Phenotyping - ONCOPILOT: CT Foundation Model for Tumors - FMBench: Fairness in Medical MLLMs - GP-GPT: LLM for Gene-Phenotype Mapping - MedAdapter: Efficient LLM Medical Adaptation - RespLLM: Multimodal LLM for Respiratory Health - MDAgents: LLM Collaboration for Medical Decisions - MedVisionLlama: LLM Medical Image Segmentation Frameworks and Methodologies: - ReXplain: AI-Driven Radiology Video Reports - BioDiscoveryAgent: AI for Genetic Experiments - ZODIAC: Multi-Agent Cardiological Diagnostics - OLAPH: Improving Biomedical LLM Factuality - OmniGenBench: Benchmarking Genomic Models Medical LLM Applications: - MMedAgent: Multimodal Medical Tool Use - AI for Mental Health Support - LLMs for Mental Disorders Detection - PharmacyGPT: AI Pharmacist Framework Medical LLMs & Benchmarks: - CliMedBench: Chinese Medical LLM Benchmark - MedSafetyBench: Evaluating Medical LLM Safety AI in Healthcare Ethics: - LLM-based Medical Dialogue Preference Alignment - Trustworthiness in Medical Imaging Models
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/JZSP-bYXQbJDw-_gA62-I.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "not-lain", "Backup6", "Krystyn", "ldwang" ], "count": 5 }, { "reaction": "๐Ÿง ", "users": [ "aaditya", "John6666", "Backup6" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "aaditya", "Backup6" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "aaditya" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "aaditya" ], "count": 1 }, { "reaction": "๐Ÿ‘€", "users": [ "ldwang" ], "count": 1 } ]
2024-10-12T20:18:23.000Z
2024-10-12T20:18:23.403Z
[]
/posts/aaditya/596746813944053
2,072
0
218365181300505
[ { "type": "text", "value": "1+2=3", "raw": "1+2=3", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
1+2=3
{ "avatarUrl": "/avatars/61126da32e45508069816b5e54a3e645.svg", "fullname": "huggingface.co", "name": "huggingface0", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿคฏ", "users": [ "YaTharThShaRma999", "Clausss", "takeraparterer", "Ainonake", "John6666", "Nymbo", "sawac", "littlecowmoo", "nyuuzyou", "d0rj", "Azamat1k", "pyhornet", "Locutusque", "AkitoP", "DiamanteAmarelo", "matchaaaaa", "kobkrit", "ngxson", "devashishd12", "SimonDL", "den0620" ], "count": 21 }, { "reaction": "โž•", "users": [ "ChocolatePlease", "DiamanteAmarelo" ], "count": 2 }, { "reaction": "๐Ÿ‘€", "users": [ "takeraparterer", "PrinceSquints" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "takeraparterer" ], "count": 1 } ]
2024-10-12T19:35:50.000Z
2024-10-14T09:53:15.250Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/660432d2d2e59abb3fd40b8c/TrvNnR8wHDh9lPHm81JfQ.png", "fullname": "David Meriwether", "name": "BigHuggyD", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674191139776-noauth.png", "fullname": "Xuan Son NGUYEN", "name": "ngxson", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 49, "isFollowing": false } ]
/posts/huggingface0/218365181300505
3,951
2
532995708886878
[ { "type": "text", "value": "I'm biased but I think HF Posts is the #1 social platform for the AI community! ๐Ÿค— That being said, most of us are already on X and now also joining Bluesky. ", "raw": "I'm biased but I think HF Posts is the #1 social platform for the AI community! ๐Ÿค— That being said, most of us are already on X and now also joining Bluesky. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Looking for us on Bsky? We started a team list here: ", "raw": "Looking for us on Bsky? We started a team list here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://bsky.app/starter-pack/did:plc:yyfrnpcutxghwc6eac4xplwp/3lbem54cnxp26", "href": "https://bsky.app/starter-pack/did:plc:yyfrnpcutxghwc6eac4xplwp/3lbem54cnxp26", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I'm biased but I think HF Posts is the #1 social platform for the AI community! ๐Ÿค— That being said, most of us are already on X and now also joining Bluesky. Looking for us on Bsky? We started a team list here: https://bsky.app/starter-pack/did:plc:yyfrnpcutxghwc6eac4xplwp/3lbem54cnxp26
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63691c3eda9b693c2730b2a2/hBtKpgo3_9003MWCGkw5d.png", "fullname": "Brigitte Tousignant", "name": "BrigitteTousi", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 136, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "Nymbo", "clem", "John6666", "rwightman", "jsulz" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "clem" ], "count": 1 } ]
2024-11-21T16:37:17.000Z
2024-11-21T16:37:17.417Z
[]
/posts/BrigitteTousi/532995708886878
766
0
179645760478421
[ { "type": "text", "value": "๐—ก๐—ฒ๐˜„ ๐—น๐—ฒ๐—ฎ๐—ฑ๐—ฒ๐—ฟ๐—ฏ๐—ผ๐—ฎ๐—ฟ๐—ฑ ๐—ฟ๐—ฎ๐—ป๐—ธ๐˜€ ๐—Ÿ๐—Ÿ๐— ๐˜€ ๐—ณ๐—ผ๐—ฟ ๐—Ÿ๐—Ÿ๐— -๐—ฎ๐˜€-๐—ฎ-๐—ท๐˜‚๐—ฑ๐—ด๐—ฒ: ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐˜๐—ผ๐—ฝ๐˜€ ๐˜๐—ต๐—ฒ ๐—ฟ๐—ฎ๐—ป๐—ธ๐—ถ๐—ป๐—ด๐˜€! ๐Ÿง‘โ€โš–๏ธ", "raw": "๐—ก๐—ฒ๐˜„ ๐—น๐—ฒ๐—ฎ๐—ฑ๐—ฒ๐—ฟ๐—ฏ๐—ผ๐—ฎ๐—ฟ๐—ฑ ๐—ฟ๐—ฎ๐—ป๐—ธ๐˜€ ๐—Ÿ๐—Ÿ๐— ๐˜€ ๐—ณ๐—ผ๐—ฟ ๐—Ÿ๐—Ÿ๐— -๐—ฎ๐˜€-๐—ฎ-๐—ท๐˜‚๐—ฑ๐—ด๐—ฒ: ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐˜๐—ผ๐—ฝ๐˜€ ๐˜๐—ต๐—ฒ ๐—ฟ๐—ฎ๐—ป๐—ธ๐—ถ๐—ป๐—ด๐˜€! ๐Ÿง‘โ€โš–๏ธ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Evaluating systems is critical during prototyping and in production, and LLM-as-a-judge has become a standard technique to do it.", "raw": "Evaluating systems is critical during prototyping and in production, and LLM-as-a-judge has become a standard technique to do it.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "First, what is \"LLM-as-a-judge\"?", "raw": "First, what is \"LLM-as-a-judge\"?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ It's a very useful technique for evaluating LLM outputs. If anything you're evaluating cannot be properly evaluated with deterministic criteria, like the \"politeness\" of an LLM output, or how faithful it is to an original source, you can use LLM-judge instead : prompt another LLM with \"Here's an LLM output, please rate this on criterion {criterion} on a scale of 1 to 5\", then parse the number from its output, and voilร , you get your score.", "raw": "๐Ÿ‘‰ It's a very useful technique for evaluating LLM outputs. If anything you're evaluating cannot be properly evaluated with deterministic criteria, like the \"politeness\" of an LLM output, or how faithful it is to an original source, you can use LLM-judge instead : prompt another LLM with \"Here's an LLM output, please rate this on criterion {criterion} on a scale of 1 to 5\", then parse the number from its output, and voilร , you get your score.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿง But who judges the judge?", "raw": "๐Ÿง But who judges the judge?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How can you make sure your LLM-judge is reliable? You can have a specific dataset annotated with scores provided by human judges, and compare how LLM-judge scores correlate with human judge scores.", "raw": "How can you make sure your LLM-judge is reliable? You can have a specific dataset annotated with scores provided by human judges, and compare how LLM-judge scores correlate with human judge scores.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ“Š Before even running that benchmark, to get you started, there's a new option to get you started: a leaderboard that measures how well different model perform as judges!", "raw": "๐Ÿ“Š Before even running that benchmark, to get you started, there's a new option to get you started: a leaderboard that measures how well different model perform as judges!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And the outcome is surprising, models come in quite different orders from what we're used to in general rankings: probably some have much better bias mitigation than others!", "raw": "And the outcome is surprising, models come in quite different orders from what we're used to in general rankings: probably some have much better bias mitigation than others!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Take a deeper look here ๐Ÿ‘‰ ", "raw": "Take a deeper look here ๐Ÿ‘‰ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/arena-atla", "href": "https://huggingface.co/blog/arena-atla", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐—ก๐—ฒ๐˜„ ๐—น๐—ฒ๐—ฎ๐—ฑ๐—ฒ๐—ฟ๐—ฏ๐—ผ๐—ฎ๐—ฟ๐—ฑ ๐—ฟ๐—ฎ๐—ป๐—ธ๐˜€ ๐—Ÿ๐—Ÿ๐— ๐˜€ ๐—ณ๐—ผ๐—ฟ ๐—Ÿ๐—Ÿ๐— -๐—ฎ๐˜€-๐—ฎ-๐—ท๐˜‚๐—ฑ๐—ด๐—ฒ: ๐—Ÿ๐—น๐—ฎ๐—บ๐—ฎ-๐Ÿฏ.๐Ÿญ-๐Ÿณ๐Ÿฌ๐—• ๐˜๐—ผ๐—ฝ๐˜€ ๐˜๐—ต๐—ฒ ๐—ฟ๐—ฎ๐—ป๐—ธ๐—ถ๐—ป๐—ด๐˜€! ๐Ÿง‘โ€โš–๏ธ Evaluating systems is critical during prototyping and in production, and LLM-as-a-judge has become a standard technique to do it. First, what is "LLM-as-a-judge"? ๐Ÿ‘‰ It's a very useful technique for evaluating LLM outputs. If anything you're evaluating cannot be properly evaluated with deterministic criteria, like the "politeness" of an LLM output, or how faithful it is to an original source, you can use LLM-judge instead : prompt another LLM with "Here's an LLM output, please rate this on criterion {criterion} on a scale of 1 to 5", then parse the number from its output, and voilร , you get your score. ๐Ÿง But who judges the judge? How can you make sure your LLM-judge is reliable? You can have a specific dataset annotated with scores provided by human judges, and compare how LLM-judge scores correlate with human judge scores. ๐Ÿ“Š Before even running that benchmark, to get you started, there's a new option to get you started: a leaderboard that measures how well different model perform as judges! And the outcome is surprising, models come in quite different orders from what we're used to in general rankings: probably some have much better bias mitigation than others! Take a deeper look here ๐Ÿ‘‰ https://huggingface.co/blog/arena-atla
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/y_fGB9WDPBUALso86xWaV.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "takarajordan", "clem", "John6666" ], "count": 3 } ]
2024-11-21T16:01:01.000Z
2024-11-21T16:01:01.746Z
[]
/posts/m-ric/179645760478421
732
0
802713390155238
[ { "type": "text", "value": "Lifehack of the day:", "raw": "Lifehack of the day:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Adding \"r.jina.ai/\" before any url transforms it in Markdown using Jina AI's Reader! Here with ", "raw": "Adding \"r.jina.ai/\" before any url transforms it in Markdown using Jina AI's Reader! Here with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@cyrilzakka", "href": null, "resource": null, "url": null, "code": null, "user": "cyrilzakka", "label": null, "lang": null }, { "type": "text", "value": "'s blog post.", "raw": "'s blog post.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Lifehack of the day: Adding "r.jina.ai/" before any url transforms it in Markdown using Jina AI's Reader! Here with @cyrilzakka's blog post.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/KWKtTzwmVPZXcIz-ibiYa.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4f93ZrYdaKfK3F53IB51x.jpeg", "fullname": "Cyril", "name": "cyrilzakka", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 27 } ]
[ { "reaction": "๐Ÿš€", "users": [ "John6666" ], "count": 1 } ]
2024-11-21T15:38:46.000Z
2024-11-21T15:38:46.846Z
[]
/posts/m-ric/802713390155238
230
0
222403069517031
[ { "type": "text", "value": "First post here goes!", "raw": "First post here goes!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/takarajordan/CineDiffusion", "href": null, "resource": { "type": "space", "id": "takarajordan/CineDiffusion", "discussionNum": null }, "url": "https://huggingface.co/spaces/takarajordan/CineDiffusion", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Super excited to announce CineDiffusion๐ŸŽฅ, it creates images up to 4.2 Megapixels in Cinematic ultrawide formats like:", "raw": "Super excited to announce CineDiffusion๐ŸŽฅ, it creates images up to 4.2 Megapixels in Cinematic ultrawide formats like:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 2.39:1 (Modern Widescreen)", "raw": "- 2.39:1 (Modern Widescreen)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 2.76:1 (Ultra Panavision 70)", "raw": "- 2.76:1 (Ultra Panavision 70)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 3.00:1 (Experimental Ultra-wide)", "raw": "- 3.00:1 (Experimental Ultra-wide)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 4.00:1 (Polyvision)", "raw": "- 4.00:1 (Polyvision)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 2.55:1 (CinemaScope)", "raw": "- 2.55:1 (CinemaScope)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 2.20:1 (Todd-AO)", "raw": "- 2.20:1 (Todd-AO)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More to come soon!!", "raw": "More to come soon!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks to ", "raw": "Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@John6666", "href": null, "resource": null, "url": null, "code": null, "user": "John6666", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Resoldjew", "href": null, "resource": null, "url": null, "code": null, "user": "Resoldjew", "label": null, "lang": null }, { "type": "text", "value": " for your early support <3", "raw": " for your early support <3", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And thanks to the team at ShuttleAI for their brand new Shuttle-3 model, what an amazing job.", "raw": "And thanks to the team at ShuttleAI for their brand new Shuttle-3 model, what an amazing job.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/shuttleai/shuttle-3-diffusion", "href": null, "resource": { "type": "model", "id": "shuttleai/shuttle-3-diffusion", "discussionNum": null }, "url": "https://huggingface.co/shuttleai/shuttle-3-diffusion", "code": null, "user": null, "label": null, "lang": null } ]
First post here goes! https://huggingface.co/spaces/takarajordan/CineDiffusion Super excited to announce CineDiffusion๐ŸŽฅ, it creates images up to 4.2 Megapixels in Cinematic ultrawide formats like: - 2.39:1 (Modern Widescreen) - 2.76:1 (Ultra Panavision 70) - 3.00:1 (Experimental Ultra-wide) - 4.00:1 (Polyvision) - 2.55:1 (CinemaScope) - 2.20:1 (Todd-AO) More to come soon!! Thanks to @John6666 and @Resoldjew for your early support <3 And thanks to the team at ShuttleAI for their brand new Shuttle-3 model, what an amazing job. https://huggingface.co/shuttleai/shuttle-3-diffusion
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/aqVOJmgtsBbB6BFeLpL7h.jpeg", "fullname": "Jordan Legg", "name": "takarajordan", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/pGR-hop23eyai2npAa-rw.png", "fullname": "Reese Julia", "name": "Resoldjew", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "๐Ÿ‘", "users": [ "John6666", "davanstrien", "victor", "clem", "naturelizer", "Resoldjew" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "victor", "clem" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "clem" ], "count": 1 } ]
2024-11-21T15:32:49.000Z
2024-11-21T15:32:49.453Z
[]
/posts/takarajordan/222403069517031
799
0
223900777337371
[ { "type": "text", "value": "๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ", "raw": "๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ", "raw": "Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "๐Ÿ‘‰ Check it out: ", "raw": "๐Ÿ‘‰ Check it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/hbseong/HarmAug-Guard", "href": null, "resource": { "type": "model", "id": "hbseong/HarmAug-Guard", "discussionNum": null }, "url": "https://huggingface.co/hbseong/HarmAug-Guard", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก)", "raw": " (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More details in our paper: ", "raw": "More details in our paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2410.01524", "href": "https://arxiv.org/abs/2410.01524", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ๐Ÿ“œ", "raw": " ๐Ÿ“œ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning ", "raw": "#HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
๐Ÿšจ๐Ÿ”ฅ New Release Alert! ๐Ÿ”ฅ๐Ÿšจ Introducing the 435M model that outperforms Llama-Guard-3-8B while slashing 75% of the computation cost! ๐Ÿ’ป๐Ÿ’ฅ ๐Ÿ‘‰ Check it out: https://huggingface.co/hbseong/HarmAug-Guard (Yes, INFERENCE CODE INCLUDED! ๐Ÿ’ก) More details in our paper: https://arxiv.org/abs/2410.01524 ๐Ÿ“œ #HarmAug #LLM # Safety #EfficiencyBoost #Research #AI #MachineLearning
{ "avatarUrl": "/avatars/6cda37befc873a92ed6d5dcba507954a.svg", "fullname": "Haebin Seong", "name": "hbseong", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "davanstrien", "iojvsuynv", "John6666", "clem", "brianjking" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "gksriharsha", "AISafety", "clem", "brianjking" ], "count": 4 } ]
2024-11-21T13:54:34.000Z
2024-11-22T15:40:08.847Z
[ { "avatarUrl": "/avatars/e0f4ee1c029ab2241dd9b24ae86fb31a.svg", "fullname": "Brian King", "name": "brianjking", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/hbseong/223900777337371
873
1
878288650656797
[ { "type": "text", "value": "Watch and learn!", "raw": "Watch and learn!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's observe Qwen2.5-coder:0.5b on OpenAI HumanEval.", "raw": "Let's observe Qwen2.5-coder:0.5b on OpenAI HumanEval.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`pip install observers`", "href": null, "resource": null, "url": null, "code": "pip install observers", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And start collecting your data on the Hugging Face Hub.", "raw": "And start collecting your data on the Hugging Face Hub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/davidberenstein1957/openai_records", "href": null, "resource": { "type": "dataset", "id": "davidberenstein1957/openai_records", "discussionNum": null }, "url": "https://huggingface.co/datasets/davidberenstein1957/openai_records", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Library: ", "raw": "Library: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/cfahlgren1/observers", "href": "https://github.com/cfahlgren1/observers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Watch and learn! Let's observe Qwen2.5-coder:0.5b on OpenAI HumanEval. `pip install observers` And start collecting your data on the Hugging Face Hub. Dataset: https://huggingface.co/datasets/davidberenstein1957/openai_records Library: https://github.com/cfahlgren1/observers
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "davidberenstein1957", "davanstrien", "AtAndDev", "John6666", "BrigitteTousi", "clem" ], "count": 6 }, { "reaction": "๐Ÿคฏ", "users": [ "davidberenstein1957", "AtAndDev", "BrigitteTousi", "clem", "fdaudens" ], "count": 5 }, { "reaction": "๐Ÿค—", "users": [ "davidberenstein1957", "AtAndDev", "BrigitteTousi", "clem" ], "count": 4 } ]
2024-11-21T13:47:04.000Z
2024-11-21T13:47:04.986Z
[]
/posts/davidberenstein1957/878288650656797
889
0