Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
|
@@ -29,7 +29,8 @@
|
|
| 29 |
"cell_type": "code",
|
| 30 |
"execution_count": null,
|
| 31 |
"metadata": {
|
| 32 |
-
"id": "UEYEdzjgOEOE"
|
|
|
|
| 33 |
},
|
| 34 |
"outputs": [],
|
| 35 |
"source": [
|
|
@@ -47,28 +48,22 @@
|
|
| 47 |
"\n",
|
| 48 |
"def fix_bad_symbols(txt):\n",
|
| 49 |
" result = txt\n",
|
| 50 |
-
" for symbol in ['}', '{' , ')', '(', '[' , ']' , ':' , '='
|
| 51 |
" result = result.replace(symbol,'\\\\' + symbol)\n",
|
| 52 |
-
" for symbol in ['^']:\n",
|
| 53 |
-
" result = result.replace(symbol,'')\n",
|
| 54 |
" #------#\n",
|
| 55 |
-
" result = result.replace('\\\\|','|').replace(' |',' |')\n",
|
| 56 |
" return result;\n",
|
| 57 |
"\n",
|
| 58 |
"\n",
|
| 59 |
"def getPrompts(_path, separator):\n",
|
| 60 |
-
"\n",
|
| 61 |
" path = _path + '/text'\n",
|
| 62 |
" path_enc = _path + '/text_encodings'\n",
|
| 63 |
" #-----#\n",
|
| 64 |
" index = 0\n",
|
| 65 |
-
" file_index = 0\n",
|
| 66 |
" prompts = {}\n",
|
| 67 |
" text_encodings = {}\n",
|
| 68 |
" _text_encodings = {}\n",
|
| 69 |
" #-----#\n",
|
| 70 |
" for filename in os.listdir(f'{path}'):\n",
|
| 71 |
-
"\n",
|
| 72 |
" print(f'reading {filename}....')\n",
|
| 73 |
" _index = 0\n",
|
| 74 |
" %cd {path}\n",
|
|
@@ -79,27 +74,13 @@
|
|
| 79 |
" _prompts = {\n",
|
| 80 |
" key : value for key, value in _df.items()\n",
|
| 81 |
" }\n",
|
|
|
|
|
|
|
|
|
|
| 82 |
" for key in _prompts:\n",
|
| 83 |
" _index = int(key)\n",
|
| 84 |
" value = _prompts[key]\n",
|
| 85 |
-
"\n",
|
| 86 |
-
" #Read the 'header' file in the JSON\n",
|
| 87 |
-
" if _index <= 0 :\n",
|
| 88 |
-
" _NUM_ITEMS = int(value)\n",
|
| 89 |
-
" prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
|
| 90 |
-
" index = index + 1\n",
|
| 91 |
-
" continue\n",
|
| 92 |
-
" if _index <= 1 :\n",
|
| 93 |
-
" _file_name = f'{value}'\n",
|
| 94 |
-
" %cd {path_enc}\n",
|
| 95 |
-
" _text_encodings = load_file(f'{_file_name}.safetensors')\n",
|
| 96 |
-
" #Store text_encodings for the header items\n",
|
| 97 |
-
" text_encodings[f'{index-1}'] = _text_encodings[f'{_index-1}']\n",
|
| 98 |
-
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
|
| 99 |
-
" #------#\n",
|
| 100 |
-
" prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
|
| 101 |
-
" index = index + 1\n",
|
| 102 |
-
" continue\n",
|
| 103 |
" #------#\n",
|
| 104 |
" #Read the text_encodings + prompts\n",
|
| 105 |
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
|
|
@@ -108,8 +89,6 @@
|
|
| 108 |
" continue\n",
|
| 109 |
" #-------#\n",
|
| 110 |
" #--------#\n",
|
| 111 |
-
" #_text_encodings.close() #close the text_encodings file\n",
|
| 112 |
-
" file_index = file_index + 1\n",
|
| 113 |
" #----------#\n",
|
| 114 |
" NUM_ITEMS = index -1\n",
|
| 115 |
" return prompts , text_encodings , NUM_ITEMS\n",
|
|
@@ -255,8 +234,7 @@
|
|
| 255 |
" #----------#\n",
|
| 256 |
" NUM_ITEMS = index -1\n",
|
| 257 |
" return prompts , text_encodings , urls , image_encodings , NUM_ITEMS\n",
|
| 258 |
-
"
|
| 259 |
-
"\n"
|
| 260 |
]
|
| 261 |
},
|
| 262 |
{
|
|
@@ -264,35 +242,57 @@
|
|
| 264 |
"source": [
|
| 265 |
"# @title 📚 Select items to sample from\n",
|
| 266 |
"\n",
|
| 267 |
-
"prompt_features =
|
| 268 |
-
"civitai_blue_set =
|
| 269 |
-
"
|
| 270 |
-
"
|
| 271 |
-
"
|
| 272 |
-
"prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"🔸\"}\n",
|
| 273 |
-
"emojis = False # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 274 |
"#------#\n",
|
| 275 |
-
"
|
| 276 |
-
"first_names =
|
| 277 |
-
"last_names =
|
| 278 |
-
"celebs =
|
| 279 |
"#-------#\n",
|
| 280 |
"danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎀\"}\n",
|
| 281 |
-
"lyrics =
|
| 282 |
-
"tripple_nouns =
|
| 283 |
"#-----#\n",
|
| 284 |
-
"female_fullnames =
|
| 285 |
"debug = False\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
"#------#\n",
|
| 287 |
"prompts = {}\n",
|
| 288 |
"text_encodings = {}\n",
|
| 289 |
"nA = 0\n",
|
| 290 |
"#--------#\n",
|
| 291 |
"\n",
|
| 292 |
-
"if
|
| 293 |
-
" url = '/content/text-to-image-prompts/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 295 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
"if tripple_nouns:\n",
|
| 297 |
" url = '/content/text-to-image-prompts/nouns'\n",
|
| 298 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
|
@@ -344,16 +344,6 @@
|
|
| 344 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 345 |
"#--------#\n",
|
| 346 |
"\n",
|
| 347 |
-
"if civitai_yellow_set:\n",
|
| 348 |
-
" url = '/content/text-to-image-prompts/civitai-prompts/yellow'\n",
|
| 349 |
-
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 350 |
-
"#--------#\n",
|
| 351 |
-
"\n",
|
| 352 |
-
"if artby_prompts:\n",
|
| 353 |
-
" url = '/content/text-to-image-prompts/artby'\n",
|
| 354 |
-
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 355 |
-
"#--------#\n",
|
| 356 |
-
"\n",
|
| 357 |
"if suffix :\n",
|
| 358 |
" tmp = '/content/text-to-image-prompts/vocab/text_encodings/suffix/'\n",
|
| 359 |
" for item in ['common','average','rare','weird','exotic'] :\n",
|
|
@@ -381,10 +371,13 @@
|
|
| 381 |
"text_tensor = torch.zeros(NUM_VOCAB_ITEMS,768)\n",
|
| 382 |
"for index in range(NUM_VOCAB_ITEMS):\n",
|
| 383 |
" text_tensor[index] = text_encodings[f'{index}']\n",
|
| 384 |
-
"#---------#\n"
|
|
|
|
|
|
|
| 385 |
],
|
| 386 |
"metadata": {
|
| 387 |
-
"id": "CF53WIAKObg3"
|
|
|
|
| 388 |
},
|
| 389 |
"execution_count": null,
|
| 390 |
"outputs": []
|
|
@@ -394,7 +387,7 @@
|
|
| 394 |
"source": [
|
| 395 |
"# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
| 396 |
"# @markdown 🖼️ Choose a pre-encoded reference\n",
|
| 397 |
-
"index =
|
| 398 |
"PROMPT_INDEX = index\n",
|
| 399 |
"\n",
|
| 400 |
"import math\n",
|
|
@@ -411,7 +404,7 @@
|
|
| 411 |
"neg_strength = math.pow(10 ,log_strength-1)\n",
|
| 412 |
"\n",
|
| 413 |
"# @markdown ⏩ Skip item(s) containing the word\n",
|
| 414 |
-
"SKIP = '
|
| 415 |
"\n",
|
| 416 |
"# @markdown ⚖️ How important is the prompt vs. the image? <br> <br>\n",
|
| 417 |
"# @markdown <div style = \"font-size: 10px; \"> sim_ref = (10^(log_strength-1)) * ( C* text_encoding + image_encoding*(1-C) ) </div> <br>\n",
|
|
@@ -609,7 +602,8 @@
|
|
| 609 |
"#------#"
|
| 610 |
],
|
| 611 |
"metadata": {
|
| 612 |
-
"id": "XW3914T8O2uf"
|
|
|
|
| 613 |
},
|
| 614 |
"execution_count": null,
|
| 615 |
"outputs": []
|
|
@@ -680,7 +674,8 @@
|
|
| 680 |
"#-------#"
|
| 681 |
],
|
| 682 |
"metadata": {
|
| 683 |
-
"id": "EdBiAguJO9aX"
|
|
|
|
| 684 |
},
|
| 685 |
"execution_count": null,
|
| 686 |
"outputs": []
|
|
@@ -734,7 +729,8 @@
|
|
| 734 |
" json.dump(_savefile, f)\n"
|
| 735 |
],
|
| 736 |
"metadata": {
|
| 737 |
-
"id": "Q7vpNAXQilbf"
|
|
|
|
| 738 |
},
|
| 739 |
"execution_count": null,
|
| 740 |
"outputs": []
|
|
@@ -744,19 +740,26 @@
|
|
| 744 |
"source": [
|
| 745 |
"# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
|
| 746 |
"\n",
|
| 747 |
-
"#image_index = 0 # @param {type:'number'}\n",
|
| 748 |
"# @markdown 📥 Load the data (only required one time)\n",
|
| 749 |
"load_the_data = True # @param {type:\"boolean\"}\n",
|
| 750 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 751 |
"# @markdown ⚖️ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
|
| 752 |
"\n",
|
| 753 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
| 754 |
"\n",
|
| 755 |
"# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
|
| 756 |
-
"\n",
|
| 757 |
"if(load_the_data):\n",
|
| 758 |
-
" from PIL import Image\n",
|
| 759 |
-
" import requests\n",
|
| 760 |
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
|
| 761 |
" from transformers import AutoTokenizer\n",
|
| 762 |
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
|
@@ -780,16 +783,31 @@
|
|
| 780 |
"}\n",
|
| 781 |
"#------#\n",
|
| 782 |
"\n",
|
| 783 |
-
"root_savefile_name = '
|
|
|
|
|
|
|
| 784 |
"output_folder = '/content/output/savefiles/'\n",
|
| 785 |
"my_mkdirs(output_folder)\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 786 |
"NEG = '' # @param {type:'string'}\n",
|
| 787 |
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
|
| 788 |
"\n",
|
| 789 |
"for index in range(1667):\n",
|
| 790 |
"\n",
|
| 791 |
" PROMPT_INDEX = index\n",
|
| 792 |
-
"\n",
|
| 793 |
" prompt = target_prompts[f'{index}']\n",
|
| 794 |
" url = urls[f'{index}']\n",
|
| 795 |
" if url.find('perchance')>-1:\n",
|
|
@@ -797,110 +815,79 @@
|
|
| 797 |
" else: continue #print(\"(No image for this ID)\")\n",
|
| 798 |
"\n",
|
| 799 |
" print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 800 |
"\n",
|
| 801 |
-
"\n",
|
| 802 |
-
" if(
|
| 803 |
-
"
|
| 804 |
-
"
|
| 805 |
-
"\n",
|
|
|
|
| 806 |
" # text-similarity\n",
|
| 807 |
-
"
|
| 808 |
-
"
|
| 809 |
-
" neg_sims = 0*sims\n",
|
| 810 |
-
" if(NEG != ''):\n",
|
| 811 |
-
"\n",
|
| 812 |
-
" # Get text features for user input\n",
|
| 813 |
-
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
|
| 814 |
-
" text_features_NEG = model.get_text_features(**inputs)\n",
|
| 815 |
-
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
|
| 816 |
-
"\n",
|
| 817 |
-
" # text-similarity\n",
|
| 818 |
-
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
|
| 819 |
-
" #------#\n",
|
| 820 |
-
"\n",
|
| 821 |
-
" # plus image-similarity\n",
|
| 822 |
-
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
|
| 823 |
-
"\n",
|
| 824 |
-
" # minus NEG-similarity\n",
|
| 825 |
-
" sims = sims - neg_sims\n",
|
| 826 |
-
"\n",
|
| 827 |
-
" # Sort the items\n",
|
| 828 |
-
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
| 829 |
-
"\n",
|
| 830 |
-
" # @title ⚙️📝 Print the results (Advanced)\n",
|
| 831 |
-
" list_size = 1000 # param {type:'number'}\n",
|
| 832 |
-
" start_at_index = 0 # param {type:'number'}\n",
|
| 833 |
-
" print_Similarity = True # param {type:\"boolean\"}\n",
|
| 834 |
-
" print_Prompts = True # param {type:\"boolean\"}\n",
|
| 835 |
-
" print_Prefix = True # param {type:\"boolean\"}\n",
|
| 836 |
-
" print_Descriptions = True # param {type:\"boolean\"}\n",
|
| 837 |
-
" compact_Output = True # param {type:\"boolean\"}\n",
|
| 838 |
-
"\n",
|
| 839 |
-
" # @markdown -----------\n",
|
| 840 |
-
" # @markdown ⚙️📝 Printing options\n",
|
| 841 |
-
" newline_Separator = False # @param {type:\"boolean\"}\n",
|
| 842 |
"\n",
|
| 843 |
-
"
|
| 844 |
-
"
|
| 845 |
-
" start_at_index2 = 10000 # param {type:'number'}\n",
|
| 846 |
-
" rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
|
| 847 |
"\n",
|
| 848 |
-
"
|
| 849 |
-
"
|
| 850 |
"\n",
|
| 851 |
-
"
|
| 852 |
-
"
|
| 853 |
-
" separator = '|'\n",
|
| 854 |
-
" if newline_Separator : separator = separator + '\\n'\n",
|
| 855 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 856 |
" _prompts = ''\n",
|
| 857 |
-
" _sims = ''\n",
|
| 858 |
" for _index in range(start_at_index + RANGE):\n",
|
| 859 |
" if _index < start_at_index : continue\n",
|
| 860 |
" index = indices[_index].item()\n",
|
| 861 |
-
"\n",
|
| 862 |
" prompt = prompts[f'{index}']\n",
|
| 863 |
-
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
|
| 864 |
-
"\n",
|
| 865 |
-
" #Remove duplicates\n",
|
| 866 |
-
" if _prompts.find(prompt + separator)<=-1:\n",
|
| 867 |
-
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
|
| 868 |
-
" #-------#\n",
|
| 869 |
" _prompts = _prompts.replace(prompt + separator,'')\n",
|
| 870 |
" _prompts = _prompts + prompt + separator\n",
|
| 871 |
-
" #------#\n",
|
| 872 |
" #------#\n",
|
| 873 |
" _prompts = fix_bad_symbols(_prompts)\n",
|
| 874 |
-
"
|
| 875 |
-
"
|
| 876 |
-
"
|
| 877 |
-
"
|
| 878 |
-
"
|
| 879 |
-
"
|
| 880 |
-
"
|
| 881 |
-
"
|
| 882 |
-
"
|
| 883 |
-
"
|
| 884 |
-
"
|
| 885 |
-
"
|
| 886 |
-
"
|
| 887 |
-
"
|
| 888 |
-
"
|
| 889 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 890 |
" #------#\n",
|
| 891 |
-
" save_filename = f'{root_savefile_name}{PROMPT_INDEX}.json'\n",
|
| 892 |
-
" #-----#\n",
|
| 893 |
-
" %cd {output_folder}\n",
|
| 894 |
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
|
| 895 |
" with open(save_filename, 'w') as f:\n",
|
| 896 |
-
" json.dump(
|
| 897 |
" #---------#\n",
|
| 898 |
" continue\n",
|
| 899 |
-
"
|
| 900 |
-
"\n"
|
| 901 |
],
|
| 902 |
"metadata": {
|
| 903 |
-
"id": "x1uAVXZEoL0T"
|
|
|
|
| 904 |
},
|
| 905 |
"execution_count": null,
|
| 906 |
"outputs": []
|
|
@@ -925,53 +912,8 @@
|
|
| 925 |
"!zip -r {zip_dest} {root_output_folder}"
|
| 926 |
],
|
| 927 |
"metadata": {
|
| 928 |
-
"id": "zivBNrw9uSVD"
|
| 929 |
-
|
| 930 |
-
"execution_count": null,
|
| 931 |
-
"outputs": []
|
| 932 |
-
},
|
| 933 |
-
{
|
| 934 |
-
"cell_type": "code",
|
| 935 |
-
"source": [
|
| 936 |
-
"output_folder = '/content/output/fusion-gen-savefiles/'\n",
|
| 937 |
-
"index = 0\n",
|
| 938 |
-
"path = '/content/text-to-image-prompts/fusion-gen-savefiles'\n",
|
| 939 |
-
"\n",
|
| 940 |
-
"def my_mkdirs(folder):\n",
|
| 941 |
-
" if os.path.exists(folder)==False:\n",
|
| 942 |
-
" os.makedirs(folder)\n",
|
| 943 |
-
"\n",
|
| 944 |
-
"my_mkdirs(output_folder)\n",
|
| 945 |
-
"for filename in os.listdir(f'{path}'):\n",
|
| 946 |
-
" if filename.find('fusion_C05_X7_1000_')<=-1: continue\n",
|
| 947 |
-
" print(f'reading {filename}...')\n",
|
| 948 |
-
" %cd {path}\n",
|
| 949 |
-
" with open(f'{filename}', 'r') as f:\n",
|
| 950 |
-
" data = json.load(f)\n",
|
| 951 |
-
" _df = pd.DataFrame({'count': data})['count']\n",
|
| 952 |
-
" _savefile = {\n",
|
| 953 |
-
" key : value for key, value in _df.items()\n",
|
| 954 |
-
" }\n",
|
| 955 |
-
"\n",
|
| 956 |
-
" _savefile2 = {}\n",
|
| 957 |
-
"\n",
|
| 958 |
-
" for key in _savefile:\n",
|
| 959 |
-
" _savefile2[key] = _savefile[key]\n",
|
| 960 |
-
" if(key == \"_main\") :\n",
|
| 961 |
-
" _savefile2[key] = \"Prompt input only ✏️\"\n",
|
| 962 |
-
" print(\"changed\")\n",
|
| 963 |
-
" #----------#\n",
|
| 964 |
-
"\n",
|
| 965 |
-
" save_filename = f'fusion_C05_X7_1000_{index}.json'\n",
|
| 966 |
-
" index = index + 1\n",
|
| 967 |
-
"\n",
|
| 968 |
-
" %cd {output_folder}\n",
|
| 969 |
-
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
|
| 970 |
-
" with open(save_filename, 'w') as f:\n",
|
| 971 |
-
" json.dump(_savefile2, f)"
|
| 972 |
-
],
|
| 973 |
-
"metadata": {
|
| 974 |
-
"id": "A3ASDnO3IzSL"
|
| 975 |
},
|
| 976 |
"execution_count": null,
|
| 977 |
"outputs": []
|
|
|
|
| 29 |
"cell_type": "code",
|
| 30 |
"execution_count": null,
|
| 31 |
"metadata": {
|
| 32 |
+
"id": "UEYEdzjgOEOE",
|
| 33 |
+
"cellView": "form"
|
| 34 |
},
|
| 35 |
"outputs": [],
|
| 36 |
"source": [
|
|
|
|
| 48 |
"\n",
|
| 49 |
"def fix_bad_symbols(txt):\n",
|
| 50 |
" result = txt\n",
|
| 51 |
+
" for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
|
| 52 |
" result = result.replace(symbol,'\\\\' + symbol)\n",
|
|
|
|
|
|
|
| 53 |
" #------#\n",
|
|
|
|
| 54 |
" return result;\n",
|
| 55 |
"\n",
|
| 56 |
"\n",
|
| 57 |
"def getPrompts(_path, separator):\n",
|
|
|
|
| 58 |
" path = _path + '/text'\n",
|
| 59 |
" path_enc = _path + '/text_encodings'\n",
|
| 60 |
" #-----#\n",
|
| 61 |
" index = 0\n",
|
|
|
|
| 62 |
" prompts = {}\n",
|
| 63 |
" text_encodings = {}\n",
|
| 64 |
" _text_encodings = {}\n",
|
| 65 |
" #-----#\n",
|
| 66 |
" for filename in os.listdir(f'{path}'):\n",
|
|
|
|
| 67 |
" print(f'reading {filename}....')\n",
|
| 68 |
" _index = 0\n",
|
| 69 |
" %cd {path}\n",
|
|
|
|
| 74 |
" _prompts = {\n",
|
| 75 |
" key : value for key, value in _df.items()\n",
|
| 76 |
" }\n",
|
| 77 |
+
" _file_name = _prompts[f'{1}']\n",
|
| 78 |
+
" %cd {path_enc}\n",
|
| 79 |
+
" _text_encodings = load_file(f'{_file_name}.safetensors')\n",
|
| 80 |
" for key in _prompts:\n",
|
| 81 |
" _index = int(key)\n",
|
| 82 |
" value = _prompts[key]\n",
|
| 83 |
+
" if _index<2:continue\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
" #------#\n",
|
| 85 |
" #Read the text_encodings + prompts\n",
|
| 86 |
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
|
|
|
|
| 89 |
" continue\n",
|
| 90 |
" #-------#\n",
|
| 91 |
" #--------#\n",
|
|
|
|
|
|
|
| 92 |
" #----------#\n",
|
| 93 |
" NUM_ITEMS = index -1\n",
|
| 94 |
" return prompts , text_encodings , NUM_ITEMS\n",
|
|
|
|
| 234 |
" #----------#\n",
|
| 235 |
" NUM_ITEMS = index -1\n",
|
| 236 |
" return prompts , text_encodings , urls , image_encodings , NUM_ITEMS\n",
|
| 237 |
+
"#--------#"
|
|
|
|
| 238 |
]
|
| 239 |
},
|
| 240 |
{
|
|
|
|
| 242 |
"source": [
|
| 243 |
"# @title 📚 Select items to sample from\n",
|
| 244 |
"\n",
|
| 245 |
+
"prompt_features = True # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
|
| 246 |
+
"civitai_blue_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"📘\"}\n",
|
| 247 |
+
"suffix = True # @param {\"type\":\"boolean\",\"placeholder\":\"🔹\"}\n",
|
| 248 |
+
"prefix = True # @param {\"type\":\"boolean\",\"placeholder\":\"🔸\"}\n",
|
| 249 |
+
"emojis = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
|
|
|
|
|
|
| 250 |
"#------#\n",
|
| 251 |
+
"\n",
|
| 252 |
+
"first_names = True # @param {\"type\":\"boolean\",\"placeholder\":\"🔹\"}\n",
|
| 253 |
+
"last_names = True # @param {\"type\":\"boolean\",\"placeholder\":\"🔸\"}\n",
|
| 254 |
+
"celebs = True # @param {\"type\":\"boolean\",\"placeholder\":\"🆔👨\"}\n",
|
| 255 |
"#-------#\n",
|
| 256 |
"danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎀\"}\n",
|
| 257 |
+
"lyrics = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
|
| 258 |
+
"tripple_nouns = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
|
| 259 |
"#-----#\n",
|
| 260 |
+
"female_fullnames = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 261 |
"debug = False\n",
|
| 262 |
+
"\n",
|
| 263 |
+
"civitai_red_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"📕\"}\n",
|
| 264 |
+
"e621 = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 265 |
+
"prefix_suffix_pairs = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 266 |
+
"suffix_tripple = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 267 |
+
"suffix_quad = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
|
| 268 |
"#------#\n",
|
| 269 |
"prompts = {}\n",
|
| 270 |
"text_encodings = {}\n",
|
| 271 |
"nA = 0\n",
|
| 272 |
"#--------#\n",
|
| 273 |
"\n",
|
| 274 |
+
"if civitai_red_set:\n",
|
| 275 |
+
" url = '/content/text-to-image-prompts/civitai-prompts/red'\n",
|
| 276 |
+
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 277 |
+
"\n",
|
| 278 |
+
"if e621:\n",
|
| 279 |
+
" url = '/content/text-to-image-prompts/e621'\n",
|
| 280 |
+
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 281 |
+
"\n",
|
| 282 |
+
"if prefix_suffix_pairs:\n",
|
| 283 |
+
" url = '/content/text-to-image-prompts/prefix_suffix_pairs'\n",
|
| 284 |
+
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 285 |
+
"\n",
|
| 286 |
+
"if suffix_tripple:\n",
|
| 287 |
+
" url = '/content/text-to-image-prompts/suffix_tripple'\n",
|
| 288 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 289 |
"\n",
|
| 290 |
+
"if suffix_quad:\n",
|
| 291 |
+
" url = '/content/text-to-image-prompts/suffix_quad'\n",
|
| 292 |
+
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 293 |
+
"\n",
|
| 294 |
+
"\n",
|
| 295 |
+
"\n",
|
| 296 |
"if tripple_nouns:\n",
|
| 297 |
" url = '/content/text-to-image-prompts/nouns'\n",
|
| 298 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
|
|
|
| 344 |
" prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
|
| 345 |
"#--------#\n",
|
| 346 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
"if suffix :\n",
|
| 348 |
" tmp = '/content/text-to-image-prompts/vocab/text_encodings/suffix/'\n",
|
| 349 |
" for item in ['common','average','rare','weird','exotic'] :\n",
|
|
|
|
| 371 |
"text_tensor = torch.zeros(NUM_VOCAB_ITEMS,768)\n",
|
| 372 |
"for index in range(NUM_VOCAB_ITEMS):\n",
|
| 373 |
" text_tensor[index] = text_encodings[f'{index}']\n",
|
| 374 |
+
"#---------#\n",
|
| 375 |
+
"print(f\"Done loading vocabulary for the interrogator\")\n",
|
| 376 |
+
"print(f\"Vocab size is : {NUM_VOCAB_ITEMS} items\")"
|
| 377 |
],
|
| 378 |
"metadata": {
|
| 379 |
+
"id": "CF53WIAKObg3",
|
| 380 |
+
"cellView": "form"
|
| 381 |
},
|
| 382 |
"execution_count": null,
|
| 383 |
"outputs": []
|
|
|
|
| 387 |
"source": [
|
| 388 |
"# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
| 389 |
"# @markdown 🖼️ Choose a pre-encoded reference\n",
|
| 390 |
+
"index = 682 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
| 391 |
"PROMPT_INDEX = index\n",
|
| 392 |
"\n",
|
| 393 |
"import math\n",
|
|
|
|
| 404 |
"neg_strength = math.pow(10 ,log_strength-1)\n",
|
| 405 |
"\n",
|
| 406 |
"# @markdown ⏩ Skip item(s) containing the word\n",
|
| 407 |
+
"SKIP = 'futa ' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
| 408 |
"\n",
|
| 409 |
"# @markdown ⚖️ How important is the prompt vs. the image? <br> <br>\n",
|
| 410 |
"# @markdown <div style = \"font-size: 10px; \"> sim_ref = (10^(log_strength-1)) * ( C* text_encoding + image_encoding*(1-C) ) </div> <br>\n",
|
|
|
|
| 602 |
"#------#"
|
| 603 |
],
|
| 604 |
"metadata": {
|
| 605 |
+
"id": "XW3914T8O2uf",
|
| 606 |
+
"cellView": "form"
|
| 607 |
},
|
| 608 |
"execution_count": null,
|
| 609 |
"outputs": []
|
|
|
|
| 674 |
"#-------#"
|
| 675 |
],
|
| 676 |
"metadata": {
|
| 677 |
+
"id": "EdBiAguJO9aX",
|
| 678 |
+
"cellView": "form"
|
| 679 |
},
|
| 680 |
"execution_count": null,
|
| 681 |
"outputs": []
|
|
|
|
| 729 |
" json.dump(_savefile, f)\n"
|
| 730 |
],
|
| 731 |
"metadata": {
|
| 732 |
+
"id": "Q7vpNAXQilbf",
|
| 733 |
+
"cellView": "form"
|
| 734 |
},
|
| 735 |
"execution_count": null,
|
| 736 |
"outputs": []
|
|
|
|
| 740 |
"source": [
|
| 741 |
"# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
|
| 742 |
"\n",
|
|
|
|
| 743 |
"# @markdown 📥 Load the data (only required one time)\n",
|
| 744 |
"load_the_data = True # @param {type:\"boolean\"}\n",
|
| 745 |
"\n",
|
| 746 |
+
"import math\n",
|
| 747 |
+
"from safetensors.torch import load_file\n",
|
| 748 |
+
"import json , os , torch\n",
|
| 749 |
+
"import pandas as pd\n",
|
| 750 |
+
"from PIL import Image\n",
|
| 751 |
+
"import requests\n",
|
| 752 |
+
"\n",
|
| 753 |
+
"def my_mkdirs(folder):\n",
|
| 754 |
+
" if os.path.exists(folder)==False:\n",
|
| 755 |
+
" os.makedirs(folder)\n",
|
| 756 |
+
"\n",
|
| 757 |
"# @markdown ⚖️ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
|
| 758 |
"\n",
|
| 759 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
| 760 |
"\n",
|
| 761 |
"# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
|
|
|
|
| 762 |
"if(load_the_data):\n",
|
|
|
|
|
|
|
| 763 |
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
|
| 764 |
" from transformers import AutoTokenizer\n",
|
| 765 |
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
|
|
|
| 783 |
"}\n",
|
| 784 |
"#------#\n",
|
| 785 |
"\n",
|
| 786 |
+
"root_savefile_name = 'fusion_C05_X7'\n",
|
| 787 |
+
"\n",
|
| 788 |
+
"%cd /content/\n",
|
| 789 |
"output_folder = '/content/output/savefiles/'\n",
|
| 790 |
"my_mkdirs(output_folder)\n",
|
| 791 |
+
"my_mkdirs('/content/output2/savefiles/')\n",
|
| 792 |
+
"my_mkdirs('/content/output3/savefiles/')\n",
|
| 793 |
+
"my_mkdirs('/content/output4/savefiles/')\n",
|
| 794 |
+
"my_mkdirs('/content/output5/savefiles/')\n",
|
| 795 |
+
"my_mkdirs('/content/output6/savefiles/')\n",
|
| 796 |
+
"my_mkdirs('/content/output7/savefiles/')\n",
|
| 797 |
+
"my_mkdirs('/content/output8/savefiles/')\n",
|
| 798 |
+
"my_mkdirs('/content/output9/savefiles/')\n",
|
| 799 |
+
"my_mkdirs('/content/output10/savefiles/')\n",
|
| 800 |
+
"my_mkdirs('/content/output11/savefiles/')\n",
|
| 801 |
+
"my_mkdirs('/content/output12/savefiles/')\n",
|
| 802 |
+
"my_mkdirs('/content/output13/savefiles/')\n",
|
| 803 |
+
"\n",
|
| 804 |
+
"\n",
|
| 805 |
"NEG = '' # @param {type:'string'}\n",
|
| 806 |
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
|
| 807 |
"\n",
|
| 808 |
"for index in range(1667):\n",
|
| 809 |
"\n",
|
| 810 |
" PROMPT_INDEX = index\n",
|
|
|
|
| 811 |
" prompt = target_prompts[f'{index}']\n",
|
| 812 |
" url = urls[f'{index}']\n",
|
| 813 |
" if url.find('perchance')>-1:\n",
|
|
|
|
| 815 |
" else: continue #print(\"(No image for this ID)\")\n",
|
| 816 |
"\n",
|
| 817 |
" print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
|
| 818 |
+
" text_features_A = target_text_encodings[f'{index}']\n",
|
| 819 |
+
" image_features_A = target_image_encodings[f'{index}']\n",
|
| 820 |
+
" # text-similarity\n",
|
| 821 |
+
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
|
| 822 |
"\n",
|
| 823 |
+
" neg_sims = 0*sims\n",
|
| 824 |
+
" if(NEG != ''):\n",
|
| 825 |
+
" # Get text features for user input\n",
|
| 826 |
+
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
|
| 827 |
+
" text_features_NEG = model.get_text_features(**inputs)\n",
|
| 828 |
+
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
|
| 829 |
" # text-similarity\n",
|
| 830 |
+
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
|
| 831 |
+
" #------#\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 832 |
"\n",
|
| 833 |
+
" # plus image-similarity\n",
|
| 834 |
+
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
|
|
|
|
|
|
|
| 835 |
"\n",
|
| 836 |
+
" # minus NEG-similarity\n",
|
| 837 |
+
" sims = sims - neg_sims\n",
|
| 838 |
"\n",
|
| 839 |
+
" # Sort the items\n",
|
| 840 |
+
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
|
|
|
|
|
|
| 841 |
"\n",
|
| 842 |
+
" # @markdown Repeat output N times\n",
|
| 843 |
+
" RANGE = 1000\n",
|
| 844 |
+
" NUM_CHUNKS = 10+\n",
|
| 845 |
+
" separator = '|'\n",
|
| 846 |
+
" _savefiles = {}\n",
|
| 847 |
+
" #-----#\n",
|
| 848 |
+
" for chunk in range(NUM_CHUNKS):\n",
|
| 849 |
+
" if chunk=<10:continue\n",
|
| 850 |
+
" start_at_index = chunk * RANGE\n",
|
| 851 |
" _prompts = ''\n",
|
|
|
|
| 852 |
" for _index in range(start_at_index + RANGE):\n",
|
| 853 |
" if _index < start_at_index : continue\n",
|
| 854 |
" index = indices[_index].item()\n",
|
|
|
|
| 855 |
" prompt = prompts[f'{index}']\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 856 |
" _prompts = _prompts.replace(prompt + separator,'')\n",
|
| 857 |
" _prompts = _prompts + prompt + separator\n",
|
|
|
|
| 858 |
" #------#\n",
|
| 859 |
" _prompts = fix_bad_symbols(_prompts)\n",
|
| 860 |
+
" _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
|
| 861 |
+
" _savefiles[f'{chunk}'] = _prompts\n",
|
| 862 |
+
" #---------#\n",
|
| 863 |
+
" save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
|
| 864 |
+
"\n",
|
| 865 |
+
"\n",
|
| 866 |
+
" if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
|
| 867 |
+
" if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
|
| 868 |
+
" if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
|
| 869 |
+
" if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
|
| 870 |
+
" if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
|
| 871 |
+
" if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
|
| 872 |
+
" if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
|
| 873 |
+
" if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
|
| 874 |
+
" if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
|
| 875 |
+
" if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
|
| 876 |
+
" if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
|
| 877 |
+
" if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
|
| 878 |
+
"\n",
|
| 879 |
+
"\n",
|
| 880 |
" #------#\n",
|
|
|
|
|
|
|
|
|
|
| 881 |
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
|
| 882 |
" with open(save_filename, 'w') as f:\n",
|
| 883 |
+
" json.dump(_savefiles, f)\n",
|
| 884 |
" #---------#\n",
|
| 885 |
" continue\n",
|
| 886 |
+
"#-----------#"
|
|
|
|
| 887 |
],
|
| 888 |
"metadata": {
|
| 889 |
+
"id": "x1uAVXZEoL0T",
|
| 890 |
+
"cellView": "form"
|
| 891 |
},
|
| 892 |
"execution_count": null,
|
| 893 |
"outputs": []
|
|
|
|
| 912 |
"!zip -r {zip_dest} {root_output_folder}"
|
| 913 |
],
|
| 914 |
"metadata": {
|
| 915 |
+
"id": "zivBNrw9uSVD",
|
| 916 |
+
"cellView": "form"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 917 |
},
|
| 918 |
"execution_count": null,
|
| 919 |
"outputs": []
|