Upload folder using huggingface_hub
Browse files- README.md +32 -0
 - merges.txt +0 -0
 - special_tokens_map.json +5 -0
 - tokenizer.json +0 -0
 - tokenizer_config.json +9 -0
 - vocab.json +0 -0
 
    	
        README.md
    ADDED
    
    | 
         @@ -0,0 +1,32 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            ---
         
     | 
| 2 | 
         
            +
            license: mit
         
     | 
| 3 | 
         
            +
            library_name: transformers
         
     | 
| 4 | 
         
            +
            tags:
         
     | 
| 5 | 
         
            +
             - transformers.js
         
     | 
| 6 | 
         
            +
             - tokenizers
         
     | 
| 7 | 
         
            +
            ---
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            Cloned from [Xenova/gpt-4o](https://huggingface.co/Xenova/gpt-4o)
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            # GPT-4o Tokenizer
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            A 🤗-compatible version of the **GPT-4o tokenizer** (adapted from [openai/tiktoken](https://github.com/openai/tiktoken)). This means it can be used with Hugging Face libraries including [Transformers](https://github.com/huggingface/transformers), [Tokenizers](https://github.com/huggingface/tokenizers), and [Transformers.js](https://github.com/xenova/transformers.js).
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            ## Example usage:
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            ### Transformers/Tokenizers
         
     | 
| 19 | 
         
            +
            ```py
         
     | 
| 20 | 
         
            +
            from transformers import GPT2TokenizerFast
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            tokenizer = GPT2TokenizerFast.from_pretrained('Xenova/gpt-4o')
         
     | 
| 23 | 
         
            +
            assert tokenizer.encode('hello world') == [24912, 2375]
         
     | 
| 24 | 
         
            +
            ```
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            ### Transformers.js
         
     | 
| 27 | 
         
            +
            ```js
         
     | 
| 28 | 
         
            +
            import { AutoTokenizer } from '@xenova/transformers';
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            const tokenizer = await AutoTokenizer.from_pretrained('Xenova/gpt-4o');
         
     | 
| 31 | 
         
            +
            const tokens = tokenizer.encode('hello world'); // [24912, 2375]
         
     | 
| 32 | 
         
            +
            ```
         
     | 
    	
        merges.txt
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,5 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "bos_token": "<|endoftext|>",
         
     | 
| 3 | 
         
            +
              "eos_token": "<|endoftext|>",
         
     | 
| 4 | 
         
            +
              "unk_token": "<|endoftext|>"
         
     | 
| 5 | 
         
            +
            }
         
     | 
    	
        tokenizer.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,9 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "add_prefix_space": false,
         
     | 
| 3 | 
         
            +
              "bos_token": "<|endoftext|>",
         
     | 
| 4 | 
         
            +
              "clean_up_tokenization_spaces": false,
         
     | 
| 5 | 
         
            +
              "eos_token": "<|endoftext|>",
         
     | 
| 6 | 
         
            +
              "model_max_length": 128000,
         
     | 
| 7 | 
         
            +
              "tokenizer_class": "GPT2Tokenizer",
         
     | 
| 8 | 
         
            +
              "unk_token": "<|endoftext|>"
         
     | 
| 9 | 
         
            +
            }
         
     | 
    	
        vocab.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |