prajwath commited on
Commit
34aa1cf
·
verified ·
1 Parent(s): c117e41

Upload 28 files

Browse files
Task 2/Eng_French.ipynb ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "collapsed": true
7
+ },
8
+ "source": [
9
+ "# Machine Translation Project (English to French)"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 6,
15
+ "metadata": {},
16
+ "outputs": [
17
+ {
18
+ "name": "stderr",
19
+ "output_type": "stream",
20
+ "text": [
21
+ "Fetching 30 files: 100%|██████████| 30/30 [00:00<?, ?it/s]\n"
22
+ ]
23
+ },
24
+ {
25
+ "ename": "OSError",
26
+ "evalue": "SavedModel file does not exist at: C:\\Users\\prajw\\.cache\\huggingface\\hub\\models--prajwath--NullClass_Task-1\\snapshots\\db56a5e525e47ee9bde30fd099bea336bba3908f\\{saved_model.pbtxt|saved_model.pb}",
27
+ "output_type": "error",
28
+ "traceback": [
29
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
30
+ "\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
31
+ "Cell \u001b[1;32mIn[6], line 3\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhuggingface_hub\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m from_pretrained_keras\n\u001b[1;32m----> 3\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mfrom_pretrained_keras\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mprajwath/NullClass_Task-1\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
32
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\huggingface_hub\\keras_mixin.py:293\u001b[0m, in \u001b[0;36mfrom_pretrained_keras\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 240\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfrom_pretrained_keras\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mKerasModelHubMixin\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m 241\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 242\u001b[0m \u001b[38;5;124;03m Instantiate a pretrained Keras model from a pre-trained model from the Hub.\u001b[39;00m\n\u001b[0;32m 243\u001b[0m \u001b[38;5;124;03m The model is expected to be in `SavedModel` format.\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 291\u001b[0m \u001b[38;5;124;03m </Tip>\u001b[39;00m\n\u001b[0;32m 292\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 293\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m KerasModelHubMixin\u001b[38;5;241m.\u001b[39mfrom_pretrained(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
33
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\huggingface_hub\\utils\\_validators.py:114\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[0;32m 112\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[1;32m--> 114\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m fn(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
34
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\huggingface_hub\\hub_mixin.py:558\u001b[0m, in \u001b[0;36mModelHubMixin.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, force_download, resume_download, proxies, token, cache_dir, local_files_only, revision, **model_kwargs)\u001b[0m\n\u001b[0;32m 555\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_hub_mixin_inject_config \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconfig\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m model_kwargs:\n\u001b[0;32m 556\u001b[0m model_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconfig\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m config\n\u001b[1;32m--> 558\u001b[0m instance \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_from_pretrained(\n\u001b[0;32m 559\u001b[0m model_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mstr\u001b[39m(model_id),\n\u001b[0;32m 560\u001b[0m revision\u001b[38;5;241m=\u001b[39mrevision,\n\u001b[0;32m 561\u001b[0m cache_dir\u001b[38;5;241m=\u001b[39mcache_dir,\n\u001b[0;32m 562\u001b[0m force_download\u001b[38;5;241m=\u001b[39mforce_download,\n\u001b[0;32m 563\u001b[0m proxies\u001b[38;5;241m=\u001b[39mproxies,\n\u001b[0;32m 564\u001b[0m resume_download\u001b[38;5;241m=\u001b[39mresume_download,\n\u001b[0;32m 565\u001b[0m local_files_only\u001b[38;5;241m=\u001b[39mlocal_files_only,\n\u001b[0;32m 566\u001b[0m token\u001b[38;5;241m=\u001b[39mtoken,\n\u001b[0;32m 567\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[0;32m 568\u001b[0m )\n\u001b[0;32m 570\u001b[0m \u001b[38;5;66;03m# Implicitly set the config as instance attribute if not already set by the class\u001b[39;00m\n\u001b[0;32m 571\u001b[0m \u001b[38;5;66;03m# This way `config` will be available when calling `save_pretrained` or `push_to_hub`.\u001b[39;00m\n\u001b[0;32m 572\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m config \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m (\u001b[38;5;28mgetattr\u001b[39m(instance, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_hub_mixin_config\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;28;01mNone\u001b[39;00m, {})):\n",
35
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\huggingface_hub\\keras_mixin.py:494\u001b[0m, in \u001b[0;36mKerasModelHubMixin._from_pretrained\u001b[1;34m(cls, model_id, revision, cache_dir, force_download, proxies, resume_download, local_files_only, token, config, **model_kwargs)\u001b[0m\n\u001b[0;32m 491\u001b[0m storage_folder \u001b[38;5;241m=\u001b[39m model_id\n\u001b[0;32m 493\u001b[0m \u001b[38;5;66;03m# TODO: change this in a future PR. We are not returning a KerasModelHubMixin instance here...\u001b[39;00m\n\u001b[1;32m--> 494\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mkeras\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodels\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstorage_folder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 496\u001b[0m \u001b[38;5;66;03m# For now, we add a new attribute, config, to store the config loaded from the hub/a local dir.\u001b[39;00m\n\u001b[0;32m 497\u001b[0m model\u001b[38;5;241m.\u001b[39mconfig \u001b[38;5;241m=\u001b[39m config\n",
36
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 67\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[0;32m 68\u001b[0m \u001b[38;5;66;03m# To get the full stack trace, call:\u001b[39;00m\n\u001b[0;32m 69\u001b[0m \u001b[38;5;66;03m# `tf.debugging.disable_traceback_filtering()`\u001b[39;00m\n\u001b[1;32m---> 70\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 71\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 72\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
37
+ "File \u001b[1;32mc:\\Users\\prajw\\anaconda3\\envs\\TestNullClass\\lib\\site-packages\\tensorflow\\python\\saved_model\\loader_impl.py:115\u001b[0m, in \u001b[0;36mparse_saved_model\u001b[1;34m(export_dir)\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot parse file \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_to_pbtxt\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mstr\u001b[39m(e)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 114\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 115\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(\n\u001b[0;32m 116\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSavedModel file does not exist at: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mexport_dir\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mos\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39msep\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 117\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m{{\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mconstants\u001b[38;5;241m.\u001b[39mSAVED_MODEL_FILENAME_PBTXT\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m|\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 118\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconstants\u001b[38;5;241m.\u001b[39mSAVED_MODEL_FILENAME_PB\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m}}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
38
+ "\u001b[1;31mOSError\u001b[0m: SavedModel file does not exist at: C:\\Users\\prajw\\.cache\\huggingface\\hub\\models--prajwath--NullClass_Task-1\\snapshots\\db56a5e525e47ee9bde30fd099bea336bba3908f\\{saved_model.pbtxt|saved_model.pb}"
39
+ ]
40
+ }
41
+ ],
42
+ "source": [
43
+ "from huggingface_hub import from_pretrained_keras\n",
44
+ "\n",
45
+ "model = from_pretrained_keras(\"prajwath/NullClass_Task-1\")\n"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 3,
51
+ "metadata": {},
52
+ "outputs": [
53
+ {
54
+ "name": "stdout",
55
+ "output_type": "stream",
56
+ "text": [
57
+ "Collecting huggingface_hub\n",
58
+ " Using cached huggingface_hub-0.23.4-py3-none-any.whl.metadata (12 kB)\n",
59
+ "Collecting filelock (from huggingface_hub)\n",
60
+ " Using cached filelock-3.15.4-py3-none-any.whl.metadata (2.9 kB)\n",
61
+ "Collecting fsspec>=2023.5.0 (from huggingface_hub)\n",
62
+ " Using cached fsspec-2024.6.1-py3-none-any.whl.metadata (11 kB)\n",
63
+ "Requirement already satisfied: packaging>=20.9 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from huggingface_hub) (24.0)\n",
64
+ "Collecting pyyaml>=5.1 (from huggingface_hub)\n",
65
+ " Using cached PyYAML-6.0.1-cp39-cp39-win_amd64.whl.metadata (2.1 kB)\n",
66
+ "Requirement already satisfied: requests in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from huggingface_hub) (2.32.3)\n",
67
+ "Collecting tqdm>=4.42.1 (from huggingface_hub)\n",
68
+ " Using cached tqdm-4.66.4-py3-none-any.whl.metadata (57 kB)\n",
69
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from huggingface_hub) (4.12.2)\n",
70
+ "Requirement already satisfied: colorama in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tqdm>=4.42.1->huggingface_hub) (0.4.6)\n",
71
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests->huggingface_hub) (3.3.2)\n",
72
+ "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests->huggingface_hub) (2.10)\n",
73
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests->huggingface_hub) (2.2.1)\n",
74
+ "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests->huggingface_hub) (2024.6.2)\n",
75
+ "Downloading huggingface_hub-0.23.4-py3-none-any.whl (402 kB)\n",
76
+ " ---------------------------------------- 0.0/402.6 kB ? eta -:--:--\n",
77
+ " - -------------------------------------- 10.2/402.6 kB ? eta -:--:--\n",
78
+ " --- ----------------------------------- 41.0/402.6 kB 393.8 kB/s eta 0:00:01\n",
79
+ " ----------- -------------------------- 122.9/402.6 kB 798.9 kB/s eta 0:00:01\n",
80
+ " ---------------------- ----------------- 225.3/402.6 kB 1.1 MB/s eta 0:00:01\n",
81
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
82
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
83
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
84
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
85
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
86
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
87
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
88
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
89
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
90
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
91
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
92
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
93
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
94
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
95
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
96
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
97
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
98
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
99
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
100
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
101
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
102
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
103
+ " --------------------------- ------------ 276.5/402.6 kB 1.4 MB/s eta 0:00:01\n",
104
+ " --------------------------- ---------- 286.7/402.6 kB 221.2 kB/s eta 0:00:01\n",
105
+ " --------------------------- ---------- 286.7/402.6 kB 221.2 kB/s eta 0:00:01\n",
106
+ " ---------------------------- --------- 307.2/402.6 kB 221.0 kB/s eta 0:00:01\n",
107
+ " ---------------------------- --------- 307.2/402.6 kB 221.0 kB/s eta 0:00:01\n",
108
+ " ----------------------------- -------- 317.4/402.6 kB 216.0 kB/s eta 0:00:01\n",
109
+ " --------------------------------- ---- 358.4/402.6 kB 234.5 kB/s eta 0:00:01\n",
110
+ " -------------------------------------- 402.6/402.6 kB 253.6 kB/s eta 0:00:00\n",
111
+ "Downloading fsspec-2024.6.1-py3-none-any.whl (177 kB)\n",
112
+ " ---------------------------------------- 0.0/177.6 kB ? eta -:--:--\n",
113
+ " ---------------------------------------- 177.6/177.6 kB 5.4 MB/s eta 0:00:00\n",
114
+ "Downloading PyYAML-6.0.1-cp39-cp39-win_amd64.whl (152 kB)\n",
115
+ " ---------------------------------------- 0.0/152.8 kB ? eta -:--:--\n",
116
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
117
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
118
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
119
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
120
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
121
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
122
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
123
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
124
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
125
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
126
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
127
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
128
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
129
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
130
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
131
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
132
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
133
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
134
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
135
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
136
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
137
+ " ---------------- ----------------------- 61.4/152.8 kB ? eta -:--:--\n",
138
+ " ------------------ --------------------- 71.7/152.8 kB 58.7 kB/s eta 0:00:02\n",
139
+ " ------------------------ --------------- 92.2/152.8 kB 74.9 kB/s eta 0:00:01\n",
140
+ " ---------------------------- ---------- 112.6/152.8 kB 91.0 kB/s eta 0:00:01\n",
141
+ " ------------------------------- ------- 122.9/152.8 kB 94.8 kB/s eta 0:00:01\n",
142
+ " -------------------------------------- 152.8/152.8 kB 115.4 kB/s eta 0:00:00\n",
143
+ "Downloading tqdm-4.66.4-py3-none-any.whl (78 kB)\n",
144
+ " ---------------------------------------- 0.0/78.3 kB ? eta -:--:--\n",
145
+ " ---------------------------------------- 78.3/78.3 kB 4.3 MB/s eta 0:00:00\n",
146
+ "Downloading filelock-3.15.4-py3-none-any.whl (16 kB)\n",
147
+ "Installing collected packages: tqdm, pyyaml, fsspec, filelock, huggingface_hub\n",
148
+ "Successfully installed filelock-3.15.4 fsspec-2024.6.1 huggingface_hub-0.23.4 pyyaml-6.0.1 tqdm-4.66.4\n",
149
+ "Note: you may need to restart the kernel to use updated packages.\n"
150
+ ]
151
+ }
152
+ ],
153
+ "source": [
154
+ "pip install huggingface_hub\n"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": 1,
160
+ "metadata": {},
161
+ "outputs": [],
162
+ "source": [
163
+ "import collections\n",
164
+ "import numpy as np\n",
165
+ "import json\n",
166
+ "\n",
167
+ "from keras.preprocessing.text import Tokenizer\n",
168
+ "from keras.utils import pad_sequences\n",
169
+ "from keras.models import Model, Sequential\n",
170
+ "from keras.layers import Input, Dense, Embedding, GRU, LSTM, Bidirectional, Dropout, Activation, TimeDistributed, RepeatVector\n",
171
+ "from keras.optimizers import Adam\n",
172
+ "from keras.losses import sparse_categorical_crossentropy"
173
+ ]
174
+ },
175
+ {
176
+ "cell_type": "markdown",
177
+ "metadata": {},
178
+ "source": [
179
+ "### Verify access to the GPU"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": 2,
185
+ "metadata": {},
186
+ "outputs": [
187
+ {
188
+ "name": "stdout",
189
+ "output_type": "stream",
190
+ "text": [
191
+ "[name: \"/device:CPU:0\"\n",
192
+ "device_type: \"CPU\"\n",
193
+ "memory_limit: 268435456\n",
194
+ "locality {\n",
195
+ "}\n",
196
+ "incarnation: 753238729468120299\n",
197
+ "xla_global_id: -1\n",
198
+ ", name: \"/device:GPU:0\"\n",
199
+ "device_type: \"GPU\"\n",
200
+ "memory_limit: 1733715559\n",
201
+ "locality {\n",
202
+ " bus_id: 1\n",
203
+ " links {\n",
204
+ " }\n",
205
+ "}\n",
206
+ "incarnation: 17911561745832575813\n",
207
+ "physical_device_desc: \"device: 0, name: NVIDIA GeForce RTX 2050, pci bus id: 0000:01:00.0, compute capability: 8.6\"\n",
208
+ "xla_global_id: 416903419\n",
209
+ "]\n"
210
+ ]
211
+ }
212
+ ],
213
+ "source": [
214
+ "from tensorflow.python.client import device_lib\n",
215
+ "print(device_lib.list_local_devices())"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "markdown",
220
+ "metadata": {},
221
+ "source": [
222
+ "## Dataset\n",
223
+ "For our machine translation project, we opt for a dataset featuring a limited vocabulary, specifically designed to facilitate a more manageable and efficient training process. Unlike the extensive [WMT](http://www.statmt.org/) datasets, our chosen dataset ensures a quicker training time and demands fewer computational resources. This strategic decision aims to balance the learning experience while still achieving meaningful results within practical time constraints.\n",
224
+ "### Load Data"
225
+ ]
226
+ },
227
+ {
228
+ "cell_type": "code",
229
+ "execution_count": 3,
230
+ "metadata": {},
231
+ "outputs": [],
232
+ "source": [
233
+ "def load_data(path):\n",
234
+ " input_file = path\n",
235
+ " with open(input_file, \"r\") as f:\n",
236
+ " data = f.read()\n",
237
+ " return data.split('\\n')\n",
238
+ "\n",
239
+ "english_sentences = load_data('data/english')\n",
240
+ "french_sentences = load_data('data/french')"
241
+ ]
242
+ },
243
+ {
244
+ "cell_type": "markdown",
245
+ "metadata": {},
246
+ "source": [
247
+ "### Sample Data"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "code",
252
+ "execution_count": 4,
253
+ "metadata": {},
254
+ "outputs": [
255
+ {
256
+ "data": {
257
+ "text/plain": [
258
+ "['new jersey is sometimes quiet during autumn , and it is snowy in april .',\n",
259
+ " 'the united states is usually chilly during july , and it is usually freezing in november .',\n",
260
+ " 'california is usually quiet during march , and it is usually hot in june .',\n",
261
+ " 'the united states is sometimes mild during june , and it is cold in september .',\n",
262
+ " 'your least liked fruit is the grape , but my least liked is the apple .']"
263
+ ]
264
+ },
265
+ "execution_count": 4,
266
+ "metadata": {},
267
+ "output_type": "execute_result"
268
+ }
269
+ ],
270
+ "source": [
271
+ "english_sentences[:5]"
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "markdown",
276
+ "metadata": {},
277
+ "source": [
278
+ "By examining the sentences, it's apparent that they have undergone preprocessing: punctuation has been delimited with spaces, and all the text has been converted to lowercase. This preprocessing serves a crucial purpose in text preparation. Firstly, delimiting punctuation with spaces ensures that each punctuation mark is treated as a separate token, aiding the model in understanding sentence structure. Secondly, converting the entire text to lowercase standardizes the input, preventing the model from distinguishing between words solely based on their casing. This uniformity facilitates more effective training and generalization, enhancing the model's ability to grasp patterns and generate accurate translations."
279
+ ]
280
+ },
281
+ {
282
+ "cell_type": "markdown",
283
+ "metadata": {},
284
+ "source": [
285
+ "Structure of the Dataset"
286
+ ]
287
+ },
288
+ {
289
+ "cell_type": "code",
290
+ "execution_count": 5,
291
+ "metadata": {},
292
+ "outputs": [
293
+ {
294
+ "name": "stdout",
295
+ "output_type": "stream",
296
+ "text": [
297
+ "1823250 English words.\n",
298
+ "227 unique English words.\n",
299
+ "10 Most common words in the English dataset:\n",
300
+ "\"is\" \",\" \".\" \"in\" \"it\" \"during\" \"the\" \"but\" \"and\" \"sometimes\"\n",
301
+ "\n",
302
+ "1961295 French words.\n",
303
+ "355 unique French words.\n",
304
+ "10 Most common words in the French dataset:\n",
305
+ "\"est\" \".\" \",\" \"en\" \"il\" \"les\" \"mais\" \"et\" \"la\" \"parfois\"\n"
306
+ ]
307
+ }
308
+ ],
309
+ "source": [
310
+ "english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])\n",
311
+ "french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])\n",
312
+ "\n",
313
+ "print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))\n",
314
+ "print('{} unique English words.'.format(len(english_words_counter)))\n",
315
+ "print('10 Most common words in the English dataset:')\n",
316
+ "print('\"' + '\" \"'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '\"')\n",
317
+ "\n",
318
+ "print()\n",
319
+ "print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))\n",
320
+ "print('{} unique French words.'.format(len(french_words_counter)))\n",
321
+ "print('10 Most common words in the French dataset:')\n",
322
+ "print('\"' + '\" \"'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '\"')"
323
+ ]
324
+ },
325
+ {
326
+ "cell_type": "markdown",
327
+ "metadata": {},
328
+ "source": [
329
+ "### Preprocess\n",
330
+ "1. Tokenize the words into ids\n",
331
+ "2. Add padding to make all the sequences the same length."
332
+ ]
333
+ },
334
+ {
335
+ "cell_type": "code",
336
+ "execution_count": 6,
337
+ "metadata": {},
338
+ "outputs": [
339
+ {
340
+ "name": "stdout",
341
+ "output_type": "stream",
342
+ "text": [
343
+ "{'the': 1, 'quick': 2, 'a': 3, 'brown': 4, 'fox': 5, 'jumps': 6, 'over': 7, 'lazy': 8, 'dog': 9, 'by': 10, 'jove': 11, 'my': 12, 'study': 13, 'of': 14, 'lexicography': 15, 'won': 16, 'prize': 17, 'this': 18, 'is': 19, 'short': 20, 'sentence': 21}\n",
344
+ "\n",
345
+ "Sequence 1 in x\n",
346
+ " Input: The quick brown fox jumps over the lazy dog .\n",
347
+ " Output: [1, 2, 4, 5, 6, 7, 1, 8, 9]\n",
348
+ "Sequence 2 in x\n",
349
+ " Input: By Jove , my quick study of lexicography won a prize .\n",
350
+ " Output: [10, 11, 12, 2, 13, 14, 15, 16, 3, 17]\n",
351
+ "Sequence 3 in x\n",
352
+ " Input: This is a short sentence .\n",
353
+ " Output: [18, 19, 3, 20, 21]\n"
354
+ ]
355
+ }
356
+ ],
357
+ "source": [
358
+ "def tokenize(x):\n",
359
+ " tokenizer = Tokenizer()\n",
360
+ " tokenizer.fit_on_texts(x)\n",
361
+ " return tokenizer.texts_to_sequences(x), tokenizer\n",
362
+ "\n",
363
+ "text_sentences = [\n",
364
+ " 'The quick brown fox jumps over the lazy dog .',\n",
365
+ " 'By Jove , my quick study of lexicography won a prize .',\n",
366
+ " 'This is a short sentence .']\n",
367
+ "\n",
368
+ "text_tokenized, text_tokenizer = tokenize(text_sentences)\n",
369
+ "print(text_tokenizer.word_index)\n",
370
+ "print()\n",
371
+ "for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):\n",
372
+ " print('Sequence {} in x'.format(sample_i + 1))\n",
373
+ " print(' Input: {}'.format(sent))\n",
374
+ " print(' Output: {}'.format(token_sent))"
375
+ ]
376
+ },
377
+ {
378
+ "cell_type": "code",
379
+ "execution_count": 7,
380
+ "metadata": {},
381
+ "outputs": [
382
+ {
383
+ "name": "stdout",
384
+ "output_type": "stream",
385
+ "text": [
386
+ "Sequence 1 in x\n",
387
+ " Input: [1 2 4 5 6 7 1 8 9]\n",
388
+ " Output: [1 2 4 5 6 7 1 8 9 0]\n",
389
+ "Sequence 2 in x\n",
390
+ " Input: [10 11 12 2 13 14 15 16 3 17]\n",
391
+ " Output: [10 11 12 2 13 14 15 16 3 17]\n",
392
+ "Sequence 3 in x\n",
393
+ " Input: [18 19 3 20 21]\n",
394
+ " Output: [18 19 3 20 21 0 0 0 0 0]\n"
395
+ ]
396
+ }
397
+ ],
398
+ "source": [
399
+ "def pad(x, length=None):\n",
400
+ " if length is None:\n",
401
+ " length = max([len(sentence) for sentence in x])\n",
402
+ " return pad_sequences(x, maxlen=length, padding='post')\n",
403
+ "\n",
404
+ "test_pad = pad(text_tokenized)\n",
405
+ "for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):\n",
406
+ " print('Sequence {} in x'.format(sample_i + 1))\n",
407
+ " print(' Input: {}'.format(np.array(token_sent)))\n",
408
+ " print(' Output: {}'.format(pad_sent))"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 8,
414
+ "metadata": {},
415
+ "outputs": [
416
+ {
417
+ "name": "stdout",
418
+ "output_type": "stream",
419
+ "text": [
420
+ "Data Preprocessed\n",
421
+ "Max English sentence length: 15\n",
422
+ "Max French sentence length: 21\n",
423
+ "English vocabulary size: 199\n",
424
+ "French vocabulary size: 344\n"
425
+ ]
426
+ }
427
+ ],
428
+ "source": [
429
+ "def preprocess(x,y):\n",
430
+ " preprocess_x, x_tk = tokenize(x)\n",
431
+ " preprocess_y, y_tk = tokenize(y)\n",
432
+ " \n",
433
+ " preprocess_x = pad(preprocess_x)\n",
434
+ " preprocess_y = pad(preprocess_y)\n",
435
+ " \n",
436
+ " preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)\n",
437
+ " \n",
438
+ " return preprocess_x, preprocess_y, x_tk, y_tk\n",
439
+ "\n",
440
+ "preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer = preprocess(english_sentences, french_sentences)\n",
441
+ "\n",
442
+ "max_english_sequence_length = preproc_english_sentences.shape[1]\n",
443
+ "max_french_sequence_length = preproc_french_sentences.shape[1]\n",
444
+ "english_vocab_size = len(english_tokenizer.word_index)\n",
445
+ "french_vocab_size = len(french_tokenizer.word_index)\n",
446
+ "\n",
447
+ "print('Data Preprocessed')\n",
448
+ "print(\"Max English sentence length:\", max_english_sequence_length)\n",
449
+ "print(\"Max French sentence length:\", max_french_sequence_length)\n",
450
+ "print(\"English vocabulary size:\", english_vocab_size)\n",
451
+ "print(\"French vocabulary size:\", french_vocab_size)"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "markdown",
456
+ "metadata": {},
457
+ "source": [
458
+ "## Models\n",
459
+ "- Model 1 is a simple RNN\n",
460
+ "- Model 2 is a Bidirectional RNN\n",
461
+ "- Model 3 is an Embedding RNN\n",
462
+ "\n",
463
+ "### Ids Back to Text\n",
464
+ "The neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network."
465
+ ]
466
+ },
467
+ {
468
+ "cell_type": "code",
469
+ "execution_count": 9,
470
+ "metadata": {},
471
+ "outputs": [],
472
+ "source": [
473
+ "def logits_to_text(logits, tokenizer):\n",
474
+ " index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n",
475
+ " index_to_words[0] = '<PAD>'\n",
476
+ " \n",
477
+ " return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])"
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "metadata": {},
483
+ "source": [
484
+ "### Model 1: RNN\n",
485
+ "![RNN](images/rnn.png)\n",
486
+ "A basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French."
487
+ ]
488
+ },
489
+ {
490
+ "cell_type": "code",
491
+ "execution_count": 10,
492
+ "metadata": {},
493
+ "outputs": [
494
+ {
495
+ "name": "stdout",
496
+ "output_type": "stream",
497
+ "text": [
498
+ "Epoch 1/10\n",
499
+ "108/108 [==============================] - 21s 132ms/step - loss: 1.9204 - accuracy: 0.5445 - val_loss: nan - val_accuracy: 0.6271\n",
500
+ "Epoch 2/10\n",
501
+ "108/108 [==============================] - 13s 119ms/step - loss: 1.2280 - accuracy: 0.6415 - val_loss: nan - val_accuracy: 0.6736\n",
502
+ "Epoch 3/10\n",
503
+ "108/108 [==============================] - 13s 119ms/step - loss: 1.0781 - accuracy: 0.6701 - val_loss: nan - val_accuracy: 0.7027\n",
504
+ "Epoch 4/10\n",
505
+ "108/108 [==============================] - 13s 122ms/step - loss: 0.9893 - accuracy: 0.6861 - val_loss: nan - val_accuracy: 0.7056\n",
506
+ "Epoch 5/10\n",
507
+ "108/108 [==============================] - 13s 122ms/step - loss: 0.9328 - accuracy: 0.6960 - val_loss: nan - val_accuracy: 0.7206\n",
508
+ "Epoch 6/10\n",
509
+ "108/108 [==============================] - 12s 113ms/step - loss: 0.8917 - accuracy: 0.7037 - val_loss: nan - val_accuracy: 0.7074\n",
510
+ "Epoch 7/10\n",
511
+ "108/108 [==============================] - 12s 109ms/step - loss: 0.8539 - accuracy: 0.7123 - val_loss: nan - val_accuracy: 0.7419\n",
512
+ "Epoch 8/10\n",
513
+ "108/108 [==============================] - 12s 114ms/step - loss: 0.8136 - accuracy: 0.7258 - val_loss: nan - val_accuracy: 0.7366\n",
514
+ "Epoch 9/10\n",
515
+ "108/108 [==============================] - 13s 117ms/step - loss: 0.7947 - accuracy: 0.7312 - val_loss: nan - val_accuracy: 0.7469\n",
516
+ "Epoch 10/10\n",
517
+ "108/108 [==============================] - 13s 117ms/step - loss: 0.7671 - accuracy: 0.7396 - val_loss: nan - val_accuracy: 0.7694\n"
518
+ ]
519
+ },
520
+ {
521
+ "data": {
522
+ "text/plain": [
523
+ "<keras.callbacks.History at 0x1d1836779d0>"
524
+ ]
525
+ },
526
+ "execution_count": 10,
527
+ "metadata": {},
528
+ "output_type": "execute_result"
529
+ }
530
+ ],
531
+ "source": [
532
+ "def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n",
533
+ " \n",
534
+ " #Hyperparameters\n",
535
+ " learning_rate = 0.005\n",
536
+ " \n",
537
+ " # Build the layers\n",
538
+ " model = Sequential()\n",
539
+ " model.add(GRU(256, input_shape=input_shape[1:], return_sequences=True))\n",
540
+ " model.add(TimeDistributed(Dense(1024, activation='relu')))\n",
541
+ " model.add(Dropout(0.5))\n",
542
+ " model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n",
543
+ " \n",
544
+ " # Compile model\n",
545
+ " model.compile(loss = sparse_categorical_crossentropy,\n",
546
+ " optimizer = Adam(learning_rate),\n",
547
+ " metrics = ['accuracy'])\n",
548
+ " \n",
549
+ " return model\n",
550
+ "\n",
551
+ "tmp_x = pad(preproc_english_sentences, max_french_sequence_length)\n",
552
+ "tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n",
553
+ "\n",
554
+ "#Train the neural network\n",
555
+ "simple_rnn_model = simple_model(\n",
556
+ " tmp_x.shape,\n",
557
+ " max_french_sequence_length,\n",
558
+ " english_vocab_size,\n",
559
+ " french_vocab_size)\n",
560
+ "\n",
561
+ "simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)"
562
+ ]
563
+ },
564
+ {
565
+ "cell_type": "code",
566
+ "execution_count": 11,
567
+ "metadata": {},
568
+ "outputs": [
569
+ {
570
+ "name": "stdout",
571
+ "output_type": "stream",
572
+ "text": [
573
+ "Prediciton:\n",
574
+ "1/1 [==============================] - 0s 278ms/step\n",
575
+ "new jersey est parfois calme en mois de il et il est en en <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n",
576
+ "\n",
577
+ "Correct Translation:\n",
578
+ "[\"new jersey est parfois calme pendant l' automne , et il est neigeux en avril .\"]\n",
579
+ "\n",
580
+ "Original text:\n",
581
+ "['new jersey is sometimes quiet during autumn , and it is snowy in april .']\n"
582
+ ]
583
+ }
584
+ ],
585
+ "source": [
586
+ "# Print prediction(s)\n",
587
+ "print(\"Prediciton:\")\n",
588
+ "print(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))\n",
589
+ "\n",
590
+ "print(\"\\nCorrect Translation:\")\n",
591
+ "print(french_sentences[:1])\n",
592
+ "\n",
593
+ "print('\\nOriginal text:')\n",
594
+ "print(english_sentences[:1])"
595
+ ]
596
+ },
597
+ {
598
+ "cell_type": "markdown",
599
+ "metadata": {},
600
+ "source": [
601
+ "### Model 2: Bidirectional RNNs\n",
602
+ "![RNN](images/bidirectional.png)\n",
603
+ "One restriction of a RNN is that it can't see the future input, only the past. This is where bidirectional recurrent neural networks come in. They are able to see the future data."
604
+ ]
605
+ },
606
+ {
607
+ "cell_type": "code",
608
+ "execution_count": 12,
609
+ "metadata": {},
610
+ "outputs": [
611
+ {
612
+ "name": "stdout",
613
+ "output_type": "stream",
614
+ "text": [
615
+ "Model: \"sequential_1\"\n",
616
+ "_________________________________________________________________\n",
617
+ " Layer (type) Output Shape Param # \n",
618
+ "=================================================================\n",
619
+ " bidirectional (Bidirectiona (None, 21, 256) 100608 \n",
620
+ " l) \n",
621
+ " \n",
622
+ " time_distributed_2 (TimeDis (None, 21, 1024) 263168 \n",
623
+ " tributed) \n",
624
+ " \n",
625
+ " dropout_1 (Dropout) (None, 21, 1024) 0 \n",
626
+ " \n",
627
+ " time_distributed_3 (TimeDis (None, 21, 344) 352600 \n",
628
+ " tributed) \n",
629
+ " \n",
630
+ "=================================================================\n",
631
+ "Total params: 716,376\n",
632
+ "Trainable params: 716,376\n",
633
+ "Non-trainable params: 0\n",
634
+ "_________________________________________________________________\n",
635
+ "None\n",
636
+ "Epoch 1/10\n",
637
+ "108/108 [==============================] - 15s 121ms/step - loss: 1.7581 - accuracy: 0.5748 - val_loss: nan - val_accuracy: 0.6500\n",
638
+ "Epoch 2/10\n",
639
+ "108/108 [==============================] - 13s 117ms/step - loss: 1.1684 - accuracy: 0.6552 - val_loss: nan - val_accuracy: 0.6823\n",
640
+ "Epoch 3/10\n",
641
+ "108/108 [==============================] - 13s 118ms/step - loss: 1.0394 - accuracy: 0.6766 - val_loss: nan - val_accuracy: 0.6961\n",
642
+ "Epoch 4/10\n",
643
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.9543 - accuracy: 0.6900 - val_loss: nan - val_accuracy: 0.7073\n",
644
+ "Epoch 5/10\n",
645
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.8969 - accuracy: 0.6993 - val_loss: nan - val_accuracy: 0.7155\n",
646
+ "Epoch 6/10\n",
647
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.8589 - accuracy: 0.7062 - val_loss: nan - val_accuracy: 0.7109\n",
648
+ "Epoch 7/10\n",
649
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.8371 - accuracy: 0.7107 - val_loss: nan - val_accuracy: 0.7233\n",
650
+ "Epoch 8/10\n",
651
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.7975 - accuracy: 0.7195 - val_loss: nan - val_accuracy: 0.7446\n",
652
+ "Epoch 9/10\n",
653
+ "108/108 [==============================] - 13s 118ms/step - loss: 0.7670 - accuracy: 0.7282 - val_loss: nan - val_accuracy: 0.7498\n",
654
+ "Epoch 10/10\n",
655
+ "108/108 [==============================] - 13s 117ms/step - loss: 0.7318 - accuracy: 0.7401 - val_loss: nan - val_accuracy: 0.7604\n"
656
+ ]
657
+ },
658
+ {
659
+ "data": {
660
+ "text/plain": [
661
+ "<keras.callbacks.History at 0x1d184e5adf0>"
662
+ ]
663
+ },
664
+ "execution_count": 12,
665
+ "metadata": {},
666
+ "output_type": "execute_result"
667
+ }
668
+ ],
669
+ "source": [
670
+ "def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n",
671
+ " \n",
672
+ " #Hyperparameters\n",
673
+ " learning_rate = 0.005\n",
674
+ " \n",
675
+ " # Build the layers\n",
676
+ " model = Sequential()\n",
677
+ " model.add(Bidirectional(GRU(128, return_sequences=True), input_shape=input_shape[1:]))\n",
678
+ " model.add(TimeDistributed(Dense(1024, activation='relu')))\n",
679
+ " model.add(Dropout(0.5))\n",
680
+ " model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n",
681
+ " \n",
682
+ " # Compile model\n",
683
+ " model.compile(loss = sparse_categorical_crossentropy,\n",
684
+ " optimizer = Adam(learning_rate),\n",
685
+ " metrics = ['accuracy'])\n",
686
+ " \n",
687
+ " return model\n",
688
+ "\n",
689
+ "tmp_x = pad(preproc_english_sentences, max_french_sequence_length)\n",
690
+ "tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n",
691
+ "\n",
692
+ "# Train the neural network\n",
693
+ "bd_rnn_model = bd_model(\n",
694
+ " tmp_x.shape,\n",
695
+ " max_french_sequence_length,\n",
696
+ " english_vocab_size,\n",
697
+ " french_vocab_size)\n",
698
+ "\n",
699
+ "print(bd_rnn_model.summary())\n",
700
+ "\n",
701
+ "bd_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)"
702
+ ]
703
+ },
704
+ {
705
+ "cell_type": "code",
706
+ "execution_count": 13,
707
+ "metadata": {},
708
+ "outputs": [
709
+ {
710
+ "name": "stdout",
711
+ "output_type": "stream",
712
+ "text": [
713
+ "Prediciton:\n",
714
+ "1/1 [==============================] - 1s 569ms/step\n",
715
+ "new jersey est parfois chaud en mois de il et est est en en <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n",
716
+ "\n",
717
+ "Correct Translation:\n",
718
+ "[\"new jersey est parfois calme pendant l' automne , et il est neigeux en avril .\"]\n",
719
+ "\n",
720
+ "Original text:\n",
721
+ "['new jersey is sometimes quiet during autumn , and it is snowy in april .']\n"
722
+ ]
723
+ }
724
+ ],
725
+ "source": [
726
+ "# Print prediction(s)\n",
727
+ "print(\"Prediciton:\")\n",
728
+ "print(logits_to_text(bd_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))\n",
729
+ "\n",
730
+ "print(\"\\nCorrect Translation:\")\n",
731
+ "print(french_sentences[:1])\n",
732
+ "\n",
733
+ "print('\\nOriginal text:')\n",
734
+ "print(english_sentences[:1])"
735
+ ]
736
+ },
737
+ {
738
+ "cell_type": "markdown",
739
+ "metadata": {},
740
+ "source": [
741
+ "### Model 3: Embedding\n",
742
+ "![RNN](images/embedding-words.png)\n",
743
+ "You've turned the words into ids, but there's a better representation of a word. This is called word embeddings. An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors."
744
+ ]
745
+ },
746
+ {
747
+ "cell_type": "code",
748
+ "execution_count": 14,
749
+ "metadata": {},
750
+ "outputs": [
751
+ {
752
+ "name": "stdout",
753
+ "output_type": "stream",
754
+ "text": [
755
+ "Model: \"sequential_2\"\n",
756
+ "_________________________________________________________________\n",
757
+ " Layer (type) Output Shape Param # \n",
758
+ "=================================================================\n",
759
+ " embedding (Embedding) (None, 21, 256) 50944 \n",
760
+ " \n",
761
+ " bidirectional_1 (Bidirectio (None, 21, 512) 789504 \n",
762
+ " nal) \n",
763
+ " \n",
764
+ " time_distributed_4 (TimeDis (None, 21, 1024) 525312 \n",
765
+ " tributed) \n",
766
+ " \n",
767
+ " dropout_2 (Dropout) (None, 21, 1024) 0 \n",
768
+ " \n",
769
+ " time_distributed_5 (TimeDis (None, 21, 344) 352600 \n",
770
+ " tributed) \n",
771
+ " \n",
772
+ "=================================================================\n",
773
+ "Total params: 1,718,360\n",
774
+ "Trainable params: 1,718,360\n",
775
+ "Non-trainable params: 0\n",
776
+ "_________________________________________________________________\n",
777
+ "None\n",
778
+ "Epoch 1/10\n",
779
+ "108/108 [==============================] - 21s 168ms/step - loss: 1.3566 - accuracy: 0.6890 - val_loss: nan - val_accuracy: 0.8730\n",
780
+ "Epoch 2/10\n",
781
+ "108/108 [==============================] - 16s 151ms/step - loss: 0.3153 - accuracy: 0.9007 - val_loss: nan - val_accuracy: 0.9377\n",
782
+ "Epoch 3/10\n",
783
+ "108/108 [==============================] - 17s 154ms/step - loss: 0.1827 - accuracy: 0.9428 - val_loss: nan - val_accuracy: 0.9572\n",
784
+ "Epoch 4/10\n",
785
+ "108/108 [==============================] - 17s 154ms/step - loss: 0.1322 - accuracy: 0.9589 - val_loss: nan - val_accuracy: 0.9685\n",
786
+ "Epoch 5/10\n",
787
+ "108/108 [==============================] - 17s 154ms/step - loss: 0.1035 - accuracy: 0.9680 - val_loss: nan - val_accuracy: 0.9734\n",
788
+ "Epoch 6/10\n",
789
+ "108/108 [==============================] - 17s 156ms/step - loss: 0.0864 - accuracy: 0.9734 - val_loss: nan - val_accuracy: 0.9764\n",
790
+ "Epoch 7/10\n",
791
+ "108/108 [==============================] - 17s 156ms/step - loss: 0.0755 - accuracy: 0.9767 - val_loss: nan - val_accuracy: 0.9774\n",
792
+ "Epoch 8/10\n",
793
+ "108/108 [==============================] - 17s 157ms/step - loss: 0.0659 - accuracy: 0.9795 - val_loss: nan - val_accuracy: 0.9805\n",
794
+ "Epoch 9/10\n",
795
+ "108/108 [==============================] - 17s 159ms/step - loss: 0.0604 - accuracy: 0.9812 - val_loss: nan - val_accuracy: 0.9813\n",
796
+ "Epoch 10/10\n",
797
+ "108/108 [==============================] - 17s 158ms/step - loss: 0.0559 - accuracy: 0.9827 - val_loss: nan - val_accuracy: 0.9825\n"
798
+ ]
799
+ },
800
+ {
801
+ "data": {
802
+ "text/plain": [
803
+ "<keras.callbacks.History at 0x1d183c75460>"
804
+ ]
805
+ },
806
+ "execution_count": 14,
807
+ "metadata": {},
808
+ "output_type": "execute_result"
809
+ }
810
+ ],
811
+ "source": [
812
+ "def bidirectional_embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n",
813
+ " \n",
814
+ " # Hyperparameters\n",
815
+ " learning_rate = 0.005\n",
816
+ " \n",
817
+ " # Build the layers\n",
818
+ " model = Sequential()\n",
819
+ " model.add(Embedding(english_vocab_size, 256, input_length=input_shape[1], input_shape=input_shape[1:]))\n",
820
+ " model.add(Bidirectional(GRU(256, return_sequences=True)))\n",
821
+ " model.add(TimeDistributed(Dense(1024, activation='relu')))\n",
822
+ " model.add(Dropout(0.5))\n",
823
+ " model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n",
824
+ " \n",
825
+ " # Compile model\n",
826
+ " model.compile(loss = sparse_categorical_crossentropy,\n",
827
+ " optimizer = Adam(learning_rate),\n",
828
+ " metrics = ['accuracy'])\n",
829
+ " \n",
830
+ " return model\n",
831
+ "\n",
832
+ "tmp_x = pad(preproc_english_sentences, max_french_sequence_length)\n",
833
+ "tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2]))\n",
834
+ "\n",
835
+ "# Build the model\n",
836
+ "embed_rnn_model = bidirectional_embed_model(\n",
837
+ " tmp_x.shape,\n",
838
+ " max_french_sequence_length,\n",
839
+ " english_vocab_size,\n",
840
+ " french_vocab_size)\n",
841
+ "\n",
842
+ "print(embed_rnn_model.summary())\n",
843
+ "\n",
844
+ "embed_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n",
845
+ " "
846
+ ]
847
+ },
848
+ {
849
+ "cell_type": "code",
850
+ "execution_count": 15,
851
+ "metadata": {},
852
+ "outputs": [
853
+ {
854
+ "name": "stdout",
855
+ "output_type": "stream",
856
+ "text": [
857
+ "Prediciton:\n",
858
+ "1/1 [==============================] - 0s 479ms/step\n",
859
+ "new jersey est parfois calme pendant l' automne et il est neigeux en avril <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n",
860
+ "\n",
861
+ "Correct Translation:\n",
862
+ "[\"new jersey est parfois calme pendant l' automne , et il est neigeux en avril .\"]\n",
863
+ "\n",
864
+ "Original text:\n",
865
+ "['new jersey is sometimes quiet during autumn , and it is snowy in april .']\n"
866
+ ]
867
+ }
868
+ ],
869
+ "source": [
870
+ "# Print prediction(s)\n",
871
+ "print(\"Prediciton:\")\n",
872
+ "print(logits_to_text(embed_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))\n",
873
+ "\n",
874
+ "print(\"\\nCorrect Translation:\")\n",
875
+ "print(french_sentences[:1])\n",
876
+ "\n",
877
+ "print('\\nOriginal text:')\n",
878
+ "print(english_sentences[:1])"
879
+ ]
880
+ },
881
+ {
882
+ "cell_type": "code",
883
+ "execution_count": 16,
884
+ "metadata": {},
885
+ "outputs": [
886
+ {
887
+ "name": "stderr",
888
+ "output_type": "stream",
889
+ "text": [
890
+ "WARNING:absl:Found untraced functions such as gru_cell_5_layer_call_fn, gru_cell_5_layer_call_and_return_conditional_losses, gru_cell_6_layer_call_fn, gru_cell_6_layer_call_and_return_conditional_losses while saving (showing 4 of 4). These functions will not be directly callable after loading.\n"
891
+ ]
892
+ },
893
+ {
894
+ "name": "stdout",
895
+ "output_type": "stream",
896
+ "text": [
897
+ "INFO:tensorflow:Assets written to: english_to_french_model\\assets\n"
898
+ ]
899
+ },
900
+ {
901
+ "name": "stderr",
902
+ "output_type": "stream",
903
+ "text": [
904
+ "INFO:tensorflow:Assets written to: english_to_french_model\\assets\n"
905
+ ]
906
+ }
907
+ ],
908
+ "source": [
909
+ "embed_rnn_model.save('english_to_french_model')\n",
910
+ "# Serialize English Tokenizer to JSON\n",
911
+ "with open('english_tokenizer.json', 'w', encoding='utf8') as f:\n",
912
+ " f.write(json.dumps(english_tokenizer.to_json(), ensure_ascii=False))\n",
913
+ " \n",
914
+ "# Serialize French Tokenizer to JSON\n",
915
+ "with open('french_tokenizer.json', 'w', encoding='utf8') as f:\n",
916
+ " f.write(json.dumps(french_tokenizer.to_json(), ensure_ascii=False))\n",
917
+ " \n",
918
+ "# Save max lengths\n",
919
+ "max_french_sequence_length_json = max_french_sequence_length\n",
920
+ "with open('sequence_length.json', 'w', encoding='utf8') as f:\n",
921
+ " f.write(json.dumps(max_french_sequence_length_json, ensure_ascii=False))"
922
+ ]
923
+ },
924
+ {
925
+ "cell_type": "code",
926
+ "execution_count": null,
927
+ "metadata": {},
928
+ "outputs": [],
929
+ "source": []
930
+ }
931
+ ],
932
+ "metadata": {
933
+ "kernelspec": {
934
+ "display_name": "Python 3",
935
+ "language": "python",
936
+ "name": "python3"
937
+ },
938
+ "language_info": {
939
+ "codemirror_mode": {
940
+ "name": "ipython",
941
+ "version": 3
942
+ },
943
+ "file_extension": ".py",
944
+ "mimetype": "text/x-python",
945
+ "name": "python",
946
+ "nbconvert_exporter": "python",
947
+ "pygments_lexer": "ipython3",
948
+ "version": "3.9.19"
949
+ }
950
+ },
951
+ "nbformat": 4,
952
+ "nbformat_minor": 1
953
+ }
Task 2/Eng_Spanish.ipynb ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Machine Translation Project (English to Spanish)"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import pathlib\n",
17
+ "import random\n",
18
+ "import string\n",
19
+ "import tensorflow.strings as tf_strings\n",
20
+ "import tensorflow.data as tf_data\n",
21
+ "import re\n",
22
+ "from keras.layers import TextVectorization\n",
23
+ "import keras\n",
24
+ "import tensorflow as tf\n",
25
+ "from keras import layers\n",
26
+ "import json"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "metadata": {},
32
+ "source": [
33
+ "### Verify access to the GPU"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 2,
39
+ "metadata": {},
40
+ "outputs": [
41
+ {
42
+ "name": "stdout",
43
+ "output_type": "stream",
44
+ "text": [
45
+ "[name: \"/device:CPU:0\"\n",
46
+ "device_type: \"CPU\"\n",
47
+ "memory_limit: 268435456\n",
48
+ "locality {\n",
49
+ "}\n",
50
+ "incarnation: 11075625745611853481\n",
51
+ "xla_global_id: -1\n",
52
+ ", name: \"/device:GPU:0\"\n",
53
+ "device_type: \"GPU\"\n",
54
+ "memory_limit: 1733715559\n",
55
+ "locality {\n",
56
+ " bus_id: 1\n",
57
+ " links {\n",
58
+ " }\n",
59
+ "}\n",
60
+ "incarnation: 17906485139926134931\n",
61
+ "physical_device_desc: \"device: 0, name: NVIDIA GeForce RTX 2050, pci bus id: 0000:01:00.0, compute capability: 8.6\"\n",
62
+ "xla_global_id: 416903419\n",
63
+ "]\n"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "from tensorflow.python.client import device_lib\n",
69
+ "print(device_lib.list_local_devices())"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "markdown",
74
+ "metadata": {},
75
+ "source": [
76
+ "### Download and prepare the data\n",
77
+ "source :\"http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\""
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": 3,
83
+ "metadata": {},
84
+ "outputs": [],
85
+ "source": [
86
+ "text_file = keras.utils.get_file(\n",
87
+ " fname = \"spa-eng.zip\",\n",
88
+ " origin = \"http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\",\n",
89
+ " extract = True,\n",
90
+ ")\n",
91
+ "\n",
92
+ "text_file = pathlib.Path(text_file).parent / \"spa-eng\" / \"spa.txt\"\n",
93
+ "\n",
94
+ "with open(text_file, \"r\") as f:\n",
95
+ " lines = f.read().split(\"\\n\")[:-1]\n",
96
+ " \n",
97
+ "text_pairs = []\n",
98
+ "\n",
99
+ "for line in lines:\n",
100
+ " eng, spa = line.split(\"\\t\")\n",
101
+ " spa = \"[start] \" + spa + \" [end]\"\n",
102
+ " text_pairs.append((eng, spa))"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": 4,
108
+ "metadata": {},
109
+ "outputs": [],
110
+ "source": [
111
+ "random.shuffle(text_pairs)"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 5,
117
+ "metadata": {},
118
+ "outputs": [
119
+ {
120
+ "name": "stdout",
121
+ "output_type": "stream",
122
+ "text": [
123
+ "('Tom came here on his own.', '[start] Tom vino aquí por sí solo. [end]')\n",
124
+ "(\"I don't want Tom to see me naked.\", '[start] No quiero que Tom me vea desnuda. [end]')\n",
125
+ "('Stand back!', '[start] ¡Retrocede! [end]')\n",
126
+ "(\"He's quite formal when he meets a stranger.\", '[start] Es muy ceremonioso cuando le presentan una persona desconocida. [end]')\n",
127
+ "(\"I'm stuck in a traffic jam.\", '[start] Estoy atrapado en un atasco. [end]')\n"
128
+ ]
129
+ }
130
+ ],
131
+ "source": [
132
+ "for i in range(5):\n",
133
+ " print(text_pairs[i])"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "markdown",
138
+ "metadata": {},
139
+ "source": [
140
+ "Structure of the Dataset"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 6,
146
+ "metadata": {},
147
+ "outputs": [
148
+ {
149
+ "name": "stdout",
150
+ "output_type": "stream",
151
+ "text": [
152
+ "118964 total pairs\n",
153
+ "83276 training pairs\n",
154
+ "17844 validation pairs\n",
155
+ "17844 test pairs\n"
156
+ ]
157
+ }
158
+ ],
159
+ "source": [
160
+ "num_val_samples = int(0.15 * len(text_pairs))\n",
161
+ "num_train_samples = len(text_pairs) - 2 * num_val_samples\n",
162
+ "train_pairs = text_pairs[:num_train_samples]\n",
163
+ "val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples]\n",
164
+ "test_pairs = text_pairs[num_train_samples + num_val_samples:]\n",
165
+ "\n",
166
+ "print(f\"{len(text_pairs)} total pairs\")\n",
167
+ "print(f\"{len(train_pairs)} training pairs\")\n",
168
+ "print(f\"{len(val_pairs)} validation pairs\")\n",
169
+ "print(f\"{len(test_pairs)} test pairs\")"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 7,
175
+ "metadata": {},
176
+ "outputs": [],
177
+ "source": [
178
+ "# parameters\n",
179
+ "strip_chars = string.punctuation + \"¿\"\n",
180
+ "strip_chars = strip_chars.replace(\"[\", \"\")\n",
181
+ "strip_chars = strip_chars.replace(\"]\", \"\")\n",
182
+ "\n",
183
+ "vocab_size = 15000\n",
184
+ "sequence_length = 20\n",
185
+ "batch_size = 64"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "markdown",
190
+ "metadata": {},
191
+ "source": [
192
+ "## Vectorize the data"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": 8,
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "def custom_standardization(input_string):\n",
202
+ " lowercase = tf_strings.lower(input_string)\n",
203
+ " return tf_strings.regex_replace(lowercase, f\"[{re.escape(strip_chars)}]\", \"\")\n",
204
+ "\n",
205
+ "# vectorization\n",
206
+ "eng_vectorization = TextVectorization(\n",
207
+ " max_tokens = vocab_size,\n",
208
+ " output_mode = \"int\",\n",
209
+ " output_sequence_length = sequence_length,\n",
210
+ ")\n",
211
+ "\n",
212
+ "spa_vectorization = TextVectorization(\n",
213
+ " max_tokens = vocab_size,\n",
214
+ " output_mode = \"int\",\n",
215
+ " output_sequence_length = sequence_length + 1,\n",
216
+ " standardize = custom_standardization,\n",
217
+ ")\n",
218
+ "\n",
219
+ "train_eng_texts = [pair[0] for pair in train_pairs]\n",
220
+ "train_spa_texts = [pair[1] for pair in train_pairs]\n",
221
+ "\n",
222
+ "eng_vectorization.adapt(train_eng_texts)\n",
223
+ "spa_vectorization.adapt(train_spa_texts)\n",
224
+ "\n",
225
+ "#save the vectorization layers\n",
226
+ "eng_vectorization_config = eng_vectorization.get_config()\n",
227
+ "eng_vectorization_config.pop('standardize', None)\n",
228
+ "eng_vocab = eng_vectorization.get_vocabulary()\n",
229
+ "with open('eng_vectorization_config.json', 'w', encoding='utf-8') as f:\n",
230
+ " json.dump(eng_vectorization_config, f)\n",
231
+ " \n",
232
+ "with open('eng_vocab.json', 'w', encoding='utf-8') as f:\n",
233
+ " json.dump(eng_vocab, f)\n",
234
+ " \n",
235
+ "spa_vectorization_config = spa_vectorization.get_config()\n",
236
+ "spa_vectorization_config.pop('standardize', None)\n",
237
+ "spa_vocab = spa_vectorization.get_vocabulary()\n",
238
+ "with open('spa_vectorization_config.json', 'w', encoding='utf-8') as f:\n",
239
+ " json.dump(spa_vectorization_config, f)\n",
240
+ " \n",
241
+ "with open('spa_vocab.json', 'w', encoding='utf-8') as f:\n",
242
+ " json.dump(spa_vocab, f)\n",
243
+ " \n",
244
+ "\n",
245
+ "def format_dataset(eng, spa):\n",
246
+ " eng = eng_vectorization(eng)\n",
247
+ " spa = spa_vectorization(spa)\n",
248
+ " return (\n",
249
+ " {\n",
250
+ " \"encoder_inputs\": eng,\n",
251
+ " \"decoder_inputs\": spa[:, :-1],\n",
252
+ " },\n",
253
+ " spa[:, 1:],\n",
254
+ " )\n",
255
+ " \n",
256
+ "def make_dataset(pairs):\n",
257
+ " eng_texts, spa_texts = zip(*pairs)\n",
258
+ " eng_texts = list(eng_texts)\n",
259
+ " spa_texts = list(spa_texts)\n",
260
+ " dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts))\n",
261
+ " dataset = dataset.batch(batch_size)\n",
262
+ " dataset = dataset.map(format_dataset)\n",
263
+ " return dataset.cache().shuffle(2048).prefetch(16)\n",
264
+ "\n",
265
+ "train_ds = make_dataset(train_pairs)\n",
266
+ "val_ds = make_dataset(val_pairs)\n",
267
+ " "
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": 9,
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "name": "stdout",
277
+ "output_type": "stream",
278
+ "text": [
279
+ "(64, 20)\n",
280
+ "(64, 20)\n"
281
+ ]
282
+ }
283
+ ],
284
+ "source": [
285
+ "for inputs,targets in train_ds.take(1):\n",
286
+ " print(inputs[\"encoder_inputs\"].shape)\n",
287
+ " print(targets.shape)"
288
+ ]
289
+ },
290
+ {
291
+ "cell_type": "markdown",
292
+ "metadata": {},
293
+ "source": [
294
+ "### Model Architecture\n",
295
+ "![Encoder-Decoder](images/encoder-decoder-context.png)\n",
296
+ "![Encoder-Decoder](images/encoder-decoder-translation.png)\n",
297
+ "![Attention Mechanism](images/attention.png)"
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "code",
302
+ "execution_count": 10,
303
+ "metadata": {},
304
+ "outputs": [],
305
+ "source": [
306
+ "# Creating an Encoder\n",
307
+ "class TransformerEncoder(layers.Layer):\n",
308
+ " def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):\n",
309
+ " super().__init__(**kwargs)\n",
310
+ " self.embed_dim = embed_dim\n",
311
+ " self.dense_dim = dense_dim\n",
312
+ " self.num_heads = num_heads\n",
313
+ " self.attention = layers.MultiHeadAttention(\n",
314
+ " num_heads = num_heads, key_dim = embed_dim\n",
315
+ " )\n",
316
+ " self.dense_proj = keras.Sequential(\n",
317
+ " [\n",
318
+ " layers.Dense(dense_dim, activation = \"relu\"),\n",
319
+ " layers.Dense(embed_dim),\n",
320
+ " ]\n",
321
+ " )\n",
322
+ " self.layernorm_1 = layers.LayerNormalization()\n",
323
+ " self.layernorm_2 = layers.LayerNormalization()\n",
324
+ " self.supports_masking = True\n",
325
+ " \n",
326
+ " def call(self, inputs, mask=None):\n",
327
+ " if mask is not None:\n",
328
+ " padding_mask = tf.cast(mask[:, None, :], dtype = tf.int32)\n",
329
+ " else:\n",
330
+ " padding_mask = None\n",
331
+ " \n",
332
+ " attention_output = self.attention(\n",
333
+ " query = inputs,\n",
334
+ " value = inputs,\n",
335
+ " key = inputs,\n",
336
+ " attention_mask = padding_mask,\n",
337
+ " )\n",
338
+ " proj_input = self.layernorm_1(inputs + attention_output)\n",
339
+ " proj_output = self.dense_proj(proj_input)\n",
340
+ " return self.layernorm_2(proj_input + proj_output)\n",
341
+ " \n",
342
+ " def get_config(self):\n",
343
+ " config = super().get_config()\n",
344
+ " config.update({\n",
345
+ " \"embed_dim\": self.embed_dim,\n",
346
+ " \"dense_dim\": self.dense_dim,\n",
347
+ " \"num_heads\": self.num_heads,\n",
348
+ " })\n",
349
+ " return config\n",
350
+ " \n",
351
+ "# Creating a Positional Embedding\n",
352
+ "class PositionalEmbedding(layers.Layer):\n",
353
+ " def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):\n",
354
+ " super().__init__(**kwargs)\n",
355
+ " self.token_embeddings = layers.Embedding(\n",
356
+ " input_dim = vocab_size, output_dim = embed_dim\n",
357
+ " )\n",
358
+ " self.position_embeddings = layers.Embedding(\n",
359
+ " input_dim = sequence_length, output_dim = embed_dim\n",
360
+ " )\n",
361
+ " self.sequence_length = sequence_length\n",
362
+ " self.vocab_size = vocab_size\n",
363
+ " self.embed_dim = embed_dim\n",
364
+ " \n",
365
+ " def call(self, inputs):\n",
366
+ " length = tf.shape(inputs)[-1]\n",
367
+ " positions = tf.range(start = 0, limit = length, delta = 1)\n",
368
+ " embedded_tokens = self.token_embeddings(inputs)\n",
369
+ " embedded_positions = self.position_embeddings(positions)\n",
370
+ " return embedded_tokens + embedded_positions\n",
371
+ " \n",
372
+ " def compute_mask(self, inputs, mask=None):\n",
373
+ " if mask is not None:\n",
374
+ " return tf.not_equal(inputs, 0)\n",
375
+ " else:\n",
376
+ " return None\n",
377
+ " \n",
378
+ " def get_config(self):\n",
379
+ " config = super().get_config()\n",
380
+ " config.update({\n",
381
+ " \"vocab_size\": self.vocab_size,\n",
382
+ " \"sequence_length\": self.sequence_length,\n",
383
+ " \"embed_dim\": self.embed_dim,\n",
384
+ " })\n",
385
+ " return config\n",
386
+ " \n",
387
+ "# Creating a Decoder\n",
388
+ "class TransformerDecoder(layers.Layer):\n",
389
+ " def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):\n",
390
+ " super().__init__(**kwargs)\n",
391
+ " self.embed_dim = embed_dim\n",
392
+ " self.latent_dim = latent_dim\n",
393
+ " self.num_heads = num_heads\n",
394
+ " self.attention_1 = layers.MultiHeadAttention(\n",
395
+ " num_heads = num_heads, key_dim = embed_dim\n",
396
+ " )\n",
397
+ " self.attention_2 = layers.MultiHeadAttention(\n",
398
+ " num_heads = num_heads, key_dim = embed_dim\n",
399
+ " )\n",
400
+ " self.dense_proj = keras.Sequential(\n",
401
+ " [\n",
402
+ " layers.Dense(latent_dim, activation = \"relu\"),\n",
403
+ " layers.Dense(embed_dim),\n",
404
+ " ]\n",
405
+ " )\n",
406
+ " self.layernorm_1 = layers.LayerNormalization()\n",
407
+ " self.layernorm_2 = layers.LayerNormalization()\n",
408
+ " self.layernorm_3 = layers.LayerNormalization()\n",
409
+ " self.supports_masking = True\n",
410
+ " \n",
411
+ " def call(self, inputs, encoder_outputs, mask=None):\n",
412
+ " casual_mask = self.get_causal_attention_mask(inputs)\n",
413
+ " if mask is not None:\n",
414
+ " padding_mask = tf.cast(mask[:, None, :], dtype = tf.int32)\n",
415
+ " padding_mask = tf.minimum(padding_mask, casual_mask)\n",
416
+ " else:\n",
417
+ " padding_mask = None\n",
418
+ " \n",
419
+ " attention_output_1 = self.attention_1(\n",
420
+ " query = inputs,\n",
421
+ " value = inputs,\n",
422
+ " key = inputs,\n",
423
+ " attention_mask = casual_mask,\n",
424
+ " )\n",
425
+ " out_1 = self.layernorm_1(inputs + attention_output_1)\n",
426
+ " \n",
427
+ " attention_output_2 = self.attention_2(\n",
428
+ " query = out_1,\n",
429
+ " value = encoder_outputs,\n",
430
+ " key = encoder_outputs,\n",
431
+ " attention_mask = padding_mask,\n",
432
+ " )\n",
433
+ " \n",
434
+ " out_2 = self.layernorm_2(out_1 + attention_output_2)\n",
435
+ " proj_output = self.dense_proj(out_2)\n",
436
+ " \n",
437
+ " return self.layernorm_3(out_2 + proj_output)\n",
438
+ " \n",
439
+ " def get_causal_attention_mask(self, inputs):\n",
440
+ " input_shape = tf.shape(inputs)\n",
441
+ " batch_size, sequence_length = input_shape[0], input_shape[1]\n",
442
+ " i = tf.range(sequence_length)[:, None]\n",
443
+ " j = tf.range(sequence_length)\n",
444
+ " mask = tf.cast(i >= j, tf.int32)\n",
445
+ " mask = tf.reshape(mask,(1, input_shape[1], input_shape[1]))\n",
446
+ " mult = tf.concat(\n",
447
+ " [\n",
448
+ " tf.expand_dims(batch_size, -1),\n",
449
+ " tf.convert_to_tensor([1, 1]),\n",
450
+ " ],\n",
451
+ " axis = 0,\n",
452
+ " )\n",
453
+ " return tf.tile(mask, mult)\n",
454
+ " \n",
455
+ " def get_config(self):\n",
456
+ " config = super().get_config()\n",
457
+ " config.update({\n",
458
+ " \"embed_dim\": self.embed_dim,\n",
459
+ " \"latent_dim\": self.latent_dim,\n",
460
+ " \"num_heads\": self.num_heads,\n",
461
+ " })\n",
462
+ " return config\n"
463
+ ]
464
+ },
465
+ {
466
+ "cell_type": "code",
467
+ "execution_count": 11,
468
+ "metadata": {},
469
+ "outputs": [],
470
+ "source": [
471
+ "# define emmbedding dimensions, latent dimensions, and number of heads\n",
472
+ "embed_dim = 256\n",
473
+ "latent_dim = 2048\n",
474
+ "num_heads = 8\n",
475
+ "\n",
476
+ "#Encoder\n",
477
+ "encoder_inputs = keras.Input(shape = (None,), dtype = \"int64\", name = \"encoder_inputs\")\n",
478
+ "\n",
479
+ "x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs)\n",
480
+ "\n",
481
+ "encoder_outputs = TransformerEncoder(embed_dim, latent_dim, num_heads)(x)\n",
482
+ "\n",
483
+ "encoder = keras.Model(encoder_inputs, encoder_outputs, name = \"encoder\")\n",
484
+ "\n",
485
+ "#Decoder\n",
486
+ "decoder_inputs = keras.Input(shape = (None,), dtype = \"int64\", name = \"decoder_inputs\")\n",
487
+ "encoder_seq_inputs = keras.Input(shape = (None, embed_dim), name = \"encoder_seq_inputs\")\n",
488
+ "\n",
489
+ "x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs)\n",
490
+ "\n",
491
+ "x = TransformerDecoder(embed_dim, latent_dim, num_heads)(x, encoder_seq_inputs)\n",
492
+ "\n",
493
+ "x = layers.Dropout(0.5)(x)\n",
494
+ "\n",
495
+ "decoder_outputs = layers.Dense(vocab_size, activation = \"softmax\")(x)\n",
496
+ "\n",
497
+ "decoder = keras.Model([decoder_inputs, encoder_seq_inputs], decoder_outputs, name = \"decoder\")\n",
498
+ "\n",
499
+ "# Define the final model\n",
500
+ "decoder_outputs = decoder([decoder_inputs, encoder_outputs])\n",
501
+ "\n",
502
+ "transformer = keras.Model(\n",
503
+ " [encoder_inputs, decoder_inputs], decoder_outputs, name = \"transformer\"\n",
504
+ ")\n"
505
+ ]
506
+ },
507
+ {
508
+ "cell_type": "code",
509
+ "execution_count": null,
510
+ "metadata": {},
511
+ "outputs": [],
512
+ "source": []
513
+ },
514
+ {
515
+ "cell_type": "code",
516
+ "execution_count": 12,
517
+ "metadata": {},
518
+ "outputs": [
519
+ {
520
+ "name": "stdout",
521
+ "output_type": "stream",
522
+ "text": [
523
+ "Model: \"transformer\"\n",
524
+ "__________________________________________________________________________________________________\n",
525
+ " Layer (type) Output Shape Param # Connected to \n",
526
+ "==================================================================================================\n",
527
+ " encoder_inputs (InputLayer) [(None, None)] 0 [] \n",
528
+ " \n",
529
+ " positional_embedding (Position (None, None, 256) 3845120 ['encoder_inputs[0][0]'] \n",
530
+ " alEmbedding) \n",
531
+ " \n",
532
+ " decoder_inputs (InputLayer) [(None, None)] 0 [] \n",
533
+ " \n",
534
+ " transformer_encoder (Transform (None, None, 256) 3155456 ['positional_embedding[0][0]'] \n",
535
+ " erEncoder) \n",
536
+ " \n",
537
+ " decoder (Functional) (None, None, 15000) 12959640 ['decoder_inputs[0][0]', \n",
538
+ " 'transformer_encoder[0][0]'] \n",
539
+ " \n",
540
+ "==================================================================================================\n",
541
+ "Total params: 19,960,216\n",
542
+ "Trainable params: 19,960,216\n",
543
+ "Non-trainable params: 0\n",
544
+ "__________________________________________________________________________________________________\n",
545
+ "Epoch 1/20\n",
546
+ "1302/1302 [==============================] - 186s 138ms/step - loss: 1.9811 - accuracy: 0.7206 - val_loss: 1.6848 - val_accuracy: 0.7416\n",
547
+ "Epoch 2/20\n",
548
+ "1302/1302 [==============================] - 179s 137ms/step - loss: 1.7133 - accuracy: 0.7508 - val_loss: 1.5721 - val_accuracy: 0.7638\n",
549
+ "Epoch 3/20\n",
550
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.5370 - accuracy: 0.7759 - val_loss: 1.3673 - val_accuracy: 0.7908\n",
551
+ "Epoch 4/20\n",
552
+ "1302/1302 [==============================] - 178s 137ms/step - loss: 1.3938 - accuracy: 0.7957 - val_loss: 1.2725 - val_accuracy: 0.8077\n",
553
+ "Epoch 5/20\n",
554
+ "1302/1302 [==============================] - 177s 136ms/step - loss: 1.2863 - accuracy: 0.8128 - val_loss: 1.2105 - val_accuracy: 0.8216\n",
555
+ "Epoch 6/20\n",
556
+ "1302/1302 [==============================] - 178s 137ms/step - loss: 1.2217 - accuracy: 0.8268 - val_loss: 1.1603 - val_accuracy: 0.8325\n",
557
+ "Epoch 7/20\n",
558
+ "1302/1302 [==============================] - 178s 137ms/step - loss: 1.1745 - accuracy: 0.8369 - val_loss: 1.1168 - val_accuracy: 0.8407\n",
559
+ "Epoch 8/20\n",
560
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.1379 - accuracy: 0.8448 - val_loss: 1.1005 - val_accuracy: 0.8429\n",
561
+ "Epoch 9/20\n",
562
+ "1302/1302 [==============================] - 177s 136ms/step - loss: 1.1092 - accuracy: 0.8506 - val_loss: 1.0816 - val_accuracy: 0.8485\n",
563
+ "Epoch 10/20\n",
564
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.0849 - accuracy: 0.8552 - val_loss: 1.0628 - val_accuracy: 0.8515\n",
565
+ "Epoch 11/20\n",
566
+ "1302/1302 [==============================] - 177s 136ms/step - loss: 1.0652 - accuracy: 0.8590 - val_loss: 1.0541 - val_accuracy: 0.8510\n",
567
+ "Epoch 12/20\n",
568
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.0469 - accuracy: 0.8622 - val_loss: 1.0370 - val_accuracy: 0.8556\n",
569
+ "Epoch 13/20\n",
570
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.0317 - accuracy: 0.8649 - val_loss: 1.0275 - val_accuracy: 0.8577\n",
571
+ "Epoch 14/20\n",
572
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.0163 - accuracy: 0.8674 - val_loss: 1.0242 - val_accuracy: 0.8584\n",
573
+ "Epoch 15/20\n",
574
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 1.0016 - accuracy: 0.8696 - val_loss: 1.0176 - val_accuracy: 0.8602\n",
575
+ "Epoch 16/20\n",
576
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 0.9884 - accuracy: 0.8717 - val_loss: 1.0102 - val_accuracy: 0.8606\n",
577
+ "Epoch 17/20\n",
578
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 0.9767 - accuracy: 0.8736 - val_loss: 1.0234 - val_accuracy: 0.8591\n",
579
+ "Epoch 18/20\n",
580
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 0.9665 - accuracy: 0.8752 - val_loss: 0.9950 - val_accuracy: 0.8634\n",
581
+ "Epoch 19/20\n",
582
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 0.9549 - accuracy: 0.8771 - val_loss: 1.0000 - val_accuracy: 0.8621\n",
583
+ "Epoch 20/20\n",
584
+ "1302/1302 [==============================] - 178s 136ms/step - loss: 0.9456 - accuracy: 0.8783 - val_loss: 1.0106 - val_accuracy: 0.8617\n"
585
+ ]
586
+ },
587
+ {
588
+ "data": {
589
+ "text/plain": [
590
+ "<keras.callbacks.History at 0x214f97284c0>"
591
+ ]
592
+ },
593
+ "execution_count": 12,
594
+ "metadata": {},
595
+ "output_type": "execute_result"
596
+ }
597
+ ],
598
+ "source": [
599
+ "epochs = 20\n",
600
+ "\n",
601
+ "transformer.summary()\n",
602
+ "\n",
603
+ "transformer.compile(\n",
604
+ " \"rmsprop\", loss = \"sparse_categorical_crossentropy\", metrics = [\"accuracy\"]\n",
605
+ ")\n",
606
+ "\n",
607
+ "transformer.fit(train_ds, epochs = epochs, validation_data = val_ds)"
608
+ ]
609
+ },
610
+ {
611
+ "cell_type": "code",
612
+ "execution_count": 13,
613
+ "metadata": {},
614
+ "outputs": [
615
+ {
616
+ "name": "stderr",
617
+ "output_type": "stream",
618
+ "text": [
619
+ "WARNING:absl:Found untraced functions such as embedding_layer_call_fn, embedding_layer_call_and_return_conditional_losses, embedding_1_layer_call_fn, embedding_1_layer_call_and_return_conditional_losses, multi_head_attention_layer_call_fn while saving (showing 5 of 60). These functions will not be directly callable after loading.\n"
620
+ ]
621
+ },
622
+ {
623
+ "name": "stdout",
624
+ "output_type": "stream",
625
+ "text": [
626
+ "INFO:tensorflow:Assets written to: transformer_model\\assets\n"
627
+ ]
628
+ },
629
+ {
630
+ "name": "stderr",
631
+ "output_type": "stream",
632
+ "text": [
633
+ "INFO:tensorflow:Assets written to: transformer_model\\assets\n"
634
+ ]
635
+ }
636
+ ],
637
+ "source": [
638
+ "transformer.save(\"transformer_model\")"
639
+ ]
640
+ },
641
+ {
642
+ "cell_type": "code",
643
+ "execution_count": 14,
644
+ "metadata": {},
645
+ "outputs": [
646
+ {
647
+ "name": "stdout",
648
+ "output_type": "stream",
649
+ "text": [
650
+ "input: her days are numbered\n",
651
+ "translated: [start] sus días están en el [UNK] [end]\n",
652
+ "\n",
653
+ "input: i heard the door close\n",
654
+ "translated: [start] he oído la puerta [end]\n",
655
+ "\n",
656
+ "input: you go first\n",
657
+ "translated: [start] vas al primero [end]\n",
658
+ "\n",
659
+ "input: you used to look up to your father\n",
660
+ "translated: [start] te [UNK] a tu padre [end]\n",
661
+ "\n",
662
+ "input: i am acquainted with the chairman of the committee\n",
663
+ "translated: [start] estoy en el [UNK] de la [UNK] [end]\n",
664
+ "\n"
665
+ ]
666
+ }
667
+ ],
668
+ "source": [
669
+ "spa_vocab = spa_vectorization.get_vocabulary()\n",
670
+ "spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))\n",
671
+ "max_decoded_sentence_length = sequence_length\n",
672
+ "\n",
673
+ "def decode_sentence(input_sentence):\n",
674
+ " tokenized_input_sentence = eng_vectorization([input_sentence])\n",
675
+ " decoded_sentence = \"[start]\"\n",
676
+ " for i in range(max_decoded_sentence_length):\n",
677
+ " tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1]\n",
678
+ " predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])\n",
679
+ " sampled_token_index = tf.argmax(predictions[0, i, :]).numpy().item(0)\n",
680
+ " sampled_token = spa_index_lookup[sampled_token_index]\n",
681
+ " decoded_sentence += \" \" + sampled_token\n",
682
+ " if sampled_token == \"[end]\":\n",
683
+ " break\n",
684
+ " return decoded_sentence\n",
685
+ "\n",
686
+ "test_eng_texts = [pair[0] for pair in test_pairs]\n",
687
+ "for _ in range(5):\n",
688
+ " input_sentence = random.choice(test_eng_texts)\n",
689
+ " input_sentence = input_sentence.lower()\n",
690
+ " input_sentence = input_sentence.translate(str.maketrans('', '', strip_chars))\n",
691
+ " translated = decode_sentence(input_sentence)\n",
692
+ " print(f\"input: {input_sentence}\")\n",
693
+ " print(f\"translated: {translated}\")\n",
694
+ " print()"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "code",
699
+ "execution_count": null,
700
+ "metadata": {},
701
+ "outputs": [],
702
+ "source": []
703
+ }
704
+ ],
705
+ "metadata": {
706
+ "kernelspec": {
707
+ "display_name": "base",
708
+ "language": "python",
709
+ "name": "python3"
710
+ },
711
+ "language_info": {
712
+ "codemirror_mode": {
713
+ "name": "ipython",
714
+ "version": 3
715
+ },
716
+ "file_extension": ".py",
717
+ "mimetype": "text/x-python",
718
+ "name": "python",
719
+ "nbconvert_exporter": "python",
720
+ "pygments_lexer": "ipython3",
721
+ "version": "3.9.19"
722
+ }
723
+ },
724
+ "nbformat": 4,
725
+ "nbformat_minor": 2
726
+ }
Task 2/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
Task 2/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Machine_Translation
Task 2/data/english ADDED
The diff for this file is too large to render. See raw diff
 
Task 2/data/french ADDED
The diff for this file is too large to render. See raw diff
 
Task 2/eng_vectorization_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "text_vectorization", "trainable": true, "batch_input_shape": [null], "dtype": "string", "max_tokens": 15000, "split": "whitespace", "ngrams": null, "output_mode": "int", "output_sequence_length": 20, "pad_to_max_tokens": false, "sparse": false, "ragged": false, "vocabulary": null, "idf_weights": null}
Task 2/eng_vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Task 2/english_to_french_model/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18727f19a8f2e298e734019630e718acf71e07ef341ce0398a88208498c47dfd
3
+ size 21276
Task 2/english_to_french_model/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ecd4484fbca10b3a8698ba8bfac97dd428bcd51198b27e3df139be775b3fcb3
3
+ size 2021585
Task 2/english_to_french_model/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8c32e017aee52f239ca0d0b7a1cd3f334cc53f7b9147d2b3aded881d1ca734f
3
+ size 20633894
Task 2/english_to_french_model/variables/variables.index ADDED
Binary file (2.91 kB). View file
 
Task 2/english_tokenizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ "{\"class_name\": \"Tokenizer\", \"config\": {\"num_words\": null, \"filters\": \"!\\\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\", \"lower\": true, \"split\": \" \", \"char_level\": false, \"oov_token\": null, \"document_count\": 137861, \"word_counts\": \"{\\\"new\\\": 12197, \\\"jersey\\\": 11225, \\\"is\\\": 205858, \\\"sometimes\\\": 37746, \\\"quiet\\\": 8693, \\\"during\\\": 74933, \\\"autumn\\\": 9004, \\\"and\\\": 59850, \\\"it\\\": 75137, \\\"snowy\\\": 8898, \\\"in\\\": 75525, \\\"april\\\": 8954, \\\"the\\\": 67628, \\\"united\\\": 11270, \\\"states\\\": 11270, \\\"usually\\\": 37507, \\\"chilly\\\": 8770, \\\"july\\\": 8956, \\\"freezing\\\": 8928, \\\"november\\\": 8951, \\\"california\\\": 11250, \\\"march\\\": 9023, \\\"hot\\\": 8639, \\\"june\\\": 9133, \\\"mild\\\": 8743, \\\"cold\\\": 8878, \\\"september\\\": 8958, \\\"your\\\": 9734, \\\"least\\\": 27564, \\\"liked\\\": 14046, \\\"fruit\\\": 27192, \\\"grape\\\": 4848, \\\"but\\\": 63987, \\\"my\\\": 9700, \\\"apple\\\": 4848, \\\"his\\\": 9700, \\\"favorite\\\": 28332, \\\"orange\\\": 4848, \\\"paris\\\": 11334, \\\"relaxing\\\": 8696, \\\"december\\\": 8945, \\\"busy\\\": 8791, \\\"spring\\\": 9102, \\\"never\\\": 37500, \\\"our\\\": 8932, \\\"lemon\\\": 4848, \\\"january\\\": 9090, \\\"warm\\\": 8890, \\\"lime\\\": 4848, \\\"her\\\": 9700, \\\"banana\\\": 4848, \\\"he\\\": 10786, \\\"saw\\\": 648, \\\"a\\\": 1944, \\\"old\\\": 972, \\\"yellow\\\": 972, \\\"truck\\\": 1944, \\\"india\\\": 11277, \\\"rainy\\\": 8761, \\\"that\\\": 2712, \\\"cat\\\": 192, \\\"was\\\": 1867, \\\"most\\\": 14934, \\\"loved\\\": 14166, \\\"animal\\\": 2304, \\\"dislikes\\\": 7314, \\\"grapefruit\\\": 10692, \\\"limes\\\": 5844, \\\"lemons\\\": 5844, \\\"february\\\": 8942, \\\"china\\\": 10953, \\\"pleasant\\\": 8916, \\\"october\\\": 8910, \\\"wonderful\\\": 8808, \\\"nice\\\": 8984, \\\"summer\\\": 8948, \\\"france\\\": 11170, \\\"may\\\": 8995, \\\"grapes\\\": 5844, \\\"mangoes\\\": 5844, \\\"their\\\": 8932, \\\"mango\\\": 4848, \\\"pear\\\": 4848, \\\"august\\\": 8789, \\\"beautiful\\\": 8915, \\\"apples\\\": 5844, \\\"peaches\\\": 5844, \\\"feared\\\": 768, \\\"shark\\\": 192, \\\"wet\\\": 8726, \\\"dry\\\": 8794, \\\"we\\\": 2532, \\\"like\\\": 4588, \\\"oranges\\\": 5844, \\\"they\\\": 3222, \\\"pears\\\": 5844, \\\"she\\\": 10786, \\\"little\\\": 1016, \\\"red\\\": 972, \\\"winter\\\": 9038, \\\"disliked\\\": 648, \\\"rusty\\\": 972, \\\"car\\\": 1944, \\\"strawberries\\\": 5844, \\\"i\\\": 2664, \\\"strawberry\\\": 4848, \\\"bananas\\\": 5844, \\\"going\\\": 666, \\\"to\\\": 5166, \\\"next\\\": 1666, \\\"plan\\\": 714, \\\"visit\\\": 1224, \\\"elephants\\\": 64, \\\"were\\\": 384, \\\"animals\\\": 768, \\\"are\\\": 870, \\\"likes\\\": 7314, \\\"dislike\\\": 4444, \\\"fall\\\": 9134, \\\"driving\\\": 1296, \\\"peach\\\": 4848, \\\"drives\\\": 648, \\\"blue\\\": 972, \\\"you\\\": 2414, \\\"bird\\\": 192, \\\"horses\\\": 64, \\\"mouse\\\": 192, \\\"went\\\": 378, \\\"last\\\": 781, \\\"horse\\\": 192, \\\"automobile\\\": 1944, \\\"dogs\\\": 64, \\\"white\\\": 972, \\\"elephant\\\": 192, \\\"black\\\": 972, \\\"think\\\": 240, \\\"difficult\\\": 260, \\\"translate\\\": 480, \\\"between\\\": 540, \\\"spanish\\\": 312, \\\"portuguese\\\": 312, \\\"big\\\": 1016, \\\"green\\\": 972, \\\"translating\\\": 300, \\\"fun\\\": 260, \\\"where\\\": 12, \\\"dog\\\": 192, \\\"why\\\": 240, \\\"might\\\": 378, \\\"go\\\": 1386, \\\"this\\\": 768, \\\"drove\\\": 648, \\\"shiny\\\": 972, \\\"sharks\\\": 64, \\\"monkey\\\": 192, \\\"how\\\": 67, \\\"weather\\\": 33, \\\"lion\\\": 192, \\\"plans\\\": 476, \\\"bear\\\": 192, \\\"rabbit\\\": 192, \\\"it's\\\": 240, \\\"chinese\\\": 312, \\\"when\\\": 144, \\\"eiffel\\\": 57, \\\"tower\\\": 57, \\\"did\\\": 204, \\\"grocery\\\": 57, \\\"store\\\": 57, \\\"wanted\\\": 378, \\\"does\\\": 24, \\\"football\\\": 57, \\\"field\\\": 57, \\\"wants\\\": 252, \\\"didn't\\\": 60, \\\"snake\\\": 192, \\\"snakes\\\": 64, \\\"do\\\": 84, \\\"easy\\\": 260, \\\"thinks\\\": 360, \\\"english\\\": 312, \\\"french\\\": 312, \\\"would\\\": 48, \\\"aren't\\\": 36, \\\"cats\\\": 64, \\\"rabbits\\\": 64, \\\"has\\\": 24, \\\"been\\\": 36, \\\"monkeys\\\": 64, \\\"lake\\\": 57, \\\"bears\\\": 64, \\\"school\\\": 57, \\\"birds\\\": 64, \\\"want\\\": 126, \\\"isn't\\\": 24, \\\"lions\\\": 64, \\\"am\\\": 24, \\\"mice\\\": 64, \\\"have\\\": 12}\", \"word_docs\": \"{\\\"in\\\": 75525, \\\"and\\\": 59850, \\\"new\\\": 12197, \\\"autumn\\\": 9004, \\\"snowy\\\": 8898, \\\"jersey\\\": 11225, \\\"april\\\": 8954, \\\"is\\\": 104561, \\\"during\\\": 74933, \\\"sometimes\\\": 32946, \\\"it\\\": 75137, \\\"quiet\\\": 8693, \\\"july\\\": 8956, \\\"november\\\": 8951, \\\"united\\\": 11270, \\\"states\\\": 11270, \\\"usually\\\": 32846, \\\"freezing\\\": 8928, \\\"chilly\\\": 8770, \\\"the\\\": 41221, \\\"california\\\": 11250, \\\"march\\\": 9023, \\\"hot\\\": 8639, \\\"june\\\": 9133, \\\"september\\\": 8958, \\\"mild\\\": 8743, \\\"cold\\\": 8878, \\\"fruit\\\": 27192, \\\"your\\\": 9734, \\\"least\\\": 14364, \\\"my\\\": 9700, \\\"liked\\\": 7446, \\\"but\\\": 63987, \\\"grape\\\": 4848, \\\"apple\\\": 4848, \\\"favorite\\\": 15132, \\\"his\\\": 9700, \\\"orange\\\": 4848, \\\"december\\\": 8945, \\\"relaxing\\\": 8696, \\\"paris\\\": 11334, \\\"spring\\\": 9102, \\\"busy\\\": 8791, \\\"never\\\": 32834, \\\"our\\\": 8932, \\\"lemon\\\": 4848, \\\"warm\\\": 8890, \\\"january\\\": 9090, \\\"her\\\": 9700, \\\"lime\\\": 4848, \\\"banana\\\": 4848, \\\"he\\\": 10786, \\\"a\\\": 1944, \\\"truck\\\": 1944, \\\"yellow\\\": 972, \\\"saw\\\": 648, \\\"old\\\": 972, \\\"rainy\\\": 8761, \\\"india\\\": 11277, \\\"loved\\\": 7566, \\\"cat\\\": 192, \\\"animal\\\": 2304, \\\"that\\\": 2712, \\\"most\\\": 8334, \\\"was\\\": 1867, \\\"limes\\\": 5844, \\\"dislikes\\\": 7314, \\\"grapefruit\\\": 10692, \\\"lemons\\\": 5844, \\\"february\\\": 8942, \\\"october\\\": 8910, \\\"china\\\": 10953, \\\"pleasant\\\": 8916, \\\"wonderful\\\": 8808, \\\"nice\\\": 8984, \\\"summer\\\": 8948, \\\"france\\\": 11170, \\\"may\\\": 8995, \\\"grapes\\\": 5844, \\\"mangoes\\\": 5844, \\\"pear\\\": 4848, \\\"their\\\": 8932, \\\"mango\\\": 4848, \\\"august\\\": 8789, \\\"beautiful\\\": 8915, \\\"apples\\\": 5844, \\\"peaches\\\": 5844, \\\"shark\\\": 192, \\\"feared\\\": 768, \\\"wet\\\": 8726, \\\"dry\\\": 8794, \\\"we\\\": 2532, \\\"oranges\\\": 5844, \\\"like\\\": 4588, \\\"they\\\": 3222, \\\"pears\\\": 5844, \\\"red\\\": 972, \\\"she\\\": 10786, \\\"little\\\": 1016, \\\"winter\\\": 9038, \\\"rusty\\\": 972, \\\"disliked\\\": 648, \\\"car\\\": 1944, \\\"strawberries\\\": 5844, \\\"i\\\": 2664, \\\"strawberry\\\": 4848, \\\"bananas\\\": 5844, \\\"going\\\": 666, \\\"to\\\": 4170, \\\"next\\\": 1666, \\\"visit\\\": 1224, \\\"plan\\\": 714, \\\"were\\\": 384, \\\"animals\\\": 768, \\\"elephants\\\": 64, \\\"are\\\": 870, \\\"likes\\\": 7314, \\\"dislike\\\": 4444, \\\"fall\\\": 9134, \\\"driving\\\": 1296, \\\"peach\\\": 4848, \\\"drives\\\": 648, \\\"blue\\\": 972, \\\"you\\\": 2414, \\\"bird\\\": 192, \\\"horses\\\": 64, \\\"mouse\\\": 192, \\\"went\\\": 378, \\\"last\\\": 781, \\\"horse\\\": 192, \\\"automobile\\\": 1944, \\\"dogs\\\": 64, \\\"white\\\": 972, \\\"elephant\\\": 192, \\\"black\\\": 972, \\\"think\\\": 240, \\\"portuguese\\\": 312, \\\"between\\\": 540, \\\"difficult\\\": 260, \\\"translate\\\": 480, \\\"spanish\\\": 312, \\\"green\\\": 972, \\\"big\\\": 1016, \\\"fun\\\": 260, \\\"translating\\\": 300, \\\"where\\\": 12, \\\"dog\\\": 192, \\\"why\\\": 240, \\\"go\\\": 1386, \\\"might\\\": 378, \\\"this\\\": 768, \\\"drove\\\": 648, \\\"shiny\\\": 972, \\\"sharks\\\": 64, \\\"monkey\\\": 192, \\\"weather\\\": 33, \\\"how\\\": 67, \\\"lion\\\": 192, \\\"plans\\\": 476, \\\"bear\\\": 192, \\\"rabbit\\\": 192, \\\"chinese\\\": 312, \\\"it's\\\": 240, \\\"when\\\": 144, \\\"eiffel\\\": 57, \\\"tower\\\": 57, \\\"store\\\": 57, \\\"did\\\": 204, \\\"grocery\\\": 57, \\\"wanted\\\": 378, \\\"does\\\": 24, \\\"field\\\": 57, \\\"football\\\": 57, \\\"wants\\\": 252, \\\"didn't\\\": 60, \\\"snake\\\": 192, \\\"snakes\\\": 64, \\\"easy\\\": 260, \\\"do\\\": 84, \\\"french\\\": 312, \\\"english\\\": 312, \\\"thinks\\\": 360, \\\"would\\\": 48, \\\"aren't\\\": 36, \\\"cats\\\": 64, \\\"rabbits\\\": 64, \\\"been\\\": 36, \\\"has\\\": 24, \\\"monkeys\\\": 64, \\\"lake\\\": 57, \\\"bears\\\": 64, \\\"school\\\": 57, \\\"birds\\\": 64, \\\"want\\\": 126, \\\"isn't\\\": 24, \\\"lions\\\": 64, \\\"am\\\": 24, \\\"mice\\\": 64, \\\"have\\\": 12}\", \"index_docs\": \"{\\\"2\\\": 75525, \\\"7\\\": 59850, \\\"17\\\": 12197, \\\"39\\\": 9004, \\\"55\\\": 8898, \\\"23\\\": 11225, \\\"44\\\": 8954, \\\"1\\\": 104561, \\\"4\\\": 74933, \\\"8\\\": 32946, \\\"3\\\": 75137, \\\"67\\\": 8693, \\\"43\\\": 8956, \\\"45\\\": 8951, \\\"20\\\": 11270, \\\"21\\\": 11270, \\\"9\\\": 32846, \\\"51\\\": 8928, \\\"62\\\": 8770, \\\"5\\\": 41221, \\\"22\\\": 11250, \\\"38\\\": 9023, \\\"68\\\": 8639, \\\"34\\\": 9133, \\\"42\\\": 8958, \\\"64\\\": 8743, \\\"57\\\": 8878, \\\"13\\\": 27192, \\\"29\\\": 9734, \\\"12\\\": 14364, \\\"30\\\": 9700, \\\"16\\\": 7446, \\\"6\\\": 63987, \\\"82\\\": 4848, \\\"83\\\": 4848, \\\"11\\\": 15132, \\\"31\\\": 9700, \\\"84\\\": 4848, \\\"47\\\": 8945, \\\"66\\\": 8696, \\\"18\\\": 11334, \\\"35\\\": 9102, \\\"60\\\": 8791, \\\"10\\\": 32834, \\\"49\\\": 8932, \\\"85\\\": 4848, \\\"56\\\": 8890, \\\"36\\\": 9090, \\\"32\\\": 9700, \\\"86\\\": 4848, \\\"87\\\": 4848, \\\"26\\\": 10786, \\\"100\\\": 1944, \\\"101\\\": 1944, \\\"112\\\": 972, \\\"127\\\": 648, \\\"111\\\": 972, \\\"63\\\": 8761, \\\"19\\\": 11277, \\\"15\\\": 7566, \\\"153\\\": 192, \\\"99\\\": 2304, \\\"95\\\": 2712, \\\"14\\\": 8334, \\\"104\\\": 1867, \\\"71\\\": 5844, \\\"69\\\": 7314, \\\"28\\\": 10692, \\\"72\\\": 5844, \\\"48\\\": 8942, \\\"54\\\": 8910, \\\"25\\\": 10953, \\\"52\\\": 8916, \\\"58\\\": 8808, \\\"41\\\": 8984, \\\"46\\\": 8948, \\\"24\\\": 11170, \\\"40\\\": 8995, \\\"73\\\": 5844, \\\"74\\\": 5844, \\\"89\\\": 4848, \\\"50\\\": 8932, \\\"88\\\": 4848, \\\"61\\\": 8789, \\\"53\\\": 8915, \\\"75\\\": 5844, \\\"76\\\": 5844, \\\"154\\\": 192, \\\"122\\\": 768, \\\"65\\\": 8726, \\\"59\\\": 8794, \\\"97\\\": 2532, \\\"77\\\": 5844, \\\"92\\\": 4588, \\\"94\\\": 3222, \\\"78\\\": 5844, \\\"113\\\": 972, \\\"27\\\": 10786, \\\"109\\\": 1016, \\\"37\\\": 9038, \\\"114\\\": 972, \\\"128\\\": 648, \\\"102\\\": 1944, \\\"79\\\": 5844, \\\"96\\\": 2664, \\\"90\\\": 4848, \\\"80\\\": 5844, \\\"126\\\": 666, \\\"81\\\": 4170, \\\"105\\\": 1666, \\\"108\\\": 1224, \\\"125\\\": 714, \\\"134\\\": 384, \\\"123\\\": 768, \\\"169\\\": 64, \\\"120\\\": 870, \\\"70\\\": 7314, \\\"93\\\": 4444, \\\"33\\\": 9134, \\\"107\\\": 1296, \\\"91\\\": 4848, \\\"129\\\": 648, \\\"115\\\": 972, \\\"98\\\": 2414, \\\"155\\\": 192, \\\"170\\\": 64, \\\"156\\\": 192, \\\"135\\\": 378, \\\"121\\\": 781, \\\"157\\\": 192, \\\"103\\\": 1944, \\\"171\\\": 64, \\\"116\\\": 972, \\\"158\\\": 192, \\\"117\\\": 972, \\\"149\\\": 240, \\\"140\\\": 312, \\\"131\\\": 540, \\\"145\\\": 260, \\\"132\\\": 480, \\\"139\\\": 312, \\\"118\\\": 972, \\\"110\\\": 1016, \\\"146\\\": 260, \\\"144\\\": 300, \\\"198\\\": 12, \\\"159\\\": 192, \\\"150\\\": 240, \\\"106\\\": 1386, \\\"136\\\": 378, \\\"124\\\": 768, \\\"130\\\": 648, \\\"119\\\": 972, \\\"172\\\": 64, \\\"160\\\": 192, \\\"193\\\": 33, \\\"168\\\": 67, \\\"161\\\": 192, \\\"133\\\": 476, \\\"162\\\": 192, \\\"163\\\": 192, \\\"141\\\": 312, \\\"151\\\": 240, \\\"165\\\": 144, \\\"182\\\": 57, \\\"183\\\": 57, \\\"185\\\": 57, \\\"152\\\": 204, \\\"184\\\": 57, \\\"137\\\": 378, \\\"194\\\": 24, \\\"187\\\": 57, \\\"186\\\": 57, \\\"148\\\": 252, \\\"181\\\": 60, \\\"164\\\": 192, \\\"173\\\": 64, \\\"147\\\": 260, \\\"167\\\": 84, \\\"143\\\": 312, \\\"142\\\": 312, \\\"138\\\": 360, \\\"190\\\": 48, \\\"191\\\": 36, \\\"174\\\": 64, \\\"175\\\": 64, \\\"192\\\": 36, \\\"195\\\": 24, \\\"176\\\": 64, \\\"188\\\": 57, \\\"177\\\": 64, \\\"189\\\": 57, \\\"178\\\": 64, \\\"166\\\": 126, \\\"196\\\": 24, \\\"179\\\": 64, \\\"197\\\": 24, \\\"180\\\": 64, \\\"199\\\": 12}\", \"index_word\": \"{\\\"1\\\": \\\"is\\\", \\\"2\\\": \\\"in\\\", \\\"3\\\": \\\"it\\\", \\\"4\\\": \\\"during\\\", \\\"5\\\": \\\"the\\\", \\\"6\\\": \\\"but\\\", \\\"7\\\": \\\"and\\\", \\\"8\\\": \\\"sometimes\\\", \\\"9\\\": \\\"usually\\\", \\\"10\\\": \\\"never\\\", \\\"11\\\": \\\"favorite\\\", \\\"12\\\": \\\"least\\\", \\\"13\\\": \\\"fruit\\\", \\\"14\\\": \\\"most\\\", \\\"15\\\": \\\"loved\\\", \\\"16\\\": \\\"liked\\\", \\\"17\\\": \\\"new\\\", \\\"18\\\": \\\"paris\\\", \\\"19\\\": \\\"india\\\", \\\"20\\\": \\\"united\\\", \\\"21\\\": \\\"states\\\", \\\"22\\\": \\\"california\\\", \\\"23\\\": \\\"jersey\\\", \\\"24\\\": \\\"france\\\", \\\"25\\\": \\\"china\\\", \\\"26\\\": \\\"he\\\", \\\"27\\\": \\\"she\\\", \\\"28\\\": \\\"grapefruit\\\", \\\"29\\\": \\\"your\\\", \\\"30\\\": \\\"my\\\", \\\"31\\\": \\\"his\\\", \\\"32\\\": \\\"her\\\", \\\"33\\\": \\\"fall\\\", \\\"34\\\": \\\"june\\\", \\\"35\\\": \\\"spring\\\", \\\"36\\\": \\\"january\\\", \\\"37\\\": \\\"winter\\\", \\\"38\\\": \\\"march\\\", \\\"39\\\": \\\"autumn\\\", \\\"40\\\": \\\"may\\\", \\\"41\\\": \\\"nice\\\", \\\"42\\\": \\\"september\\\", \\\"43\\\": \\\"july\\\", \\\"44\\\": \\\"april\\\", \\\"45\\\": \\\"november\\\", \\\"46\\\": \\\"summer\\\", \\\"47\\\": \\\"december\\\", \\\"48\\\": \\\"february\\\", \\\"49\\\": \\\"our\\\", \\\"50\\\": \\\"their\\\", \\\"51\\\": \\\"freezing\\\", \\\"52\\\": \\\"pleasant\\\", \\\"53\\\": \\\"beautiful\\\", \\\"54\\\": \\\"october\\\", \\\"55\\\": \\\"snowy\\\", \\\"56\\\": \\\"warm\\\", \\\"57\\\": \\\"cold\\\", \\\"58\\\": \\\"wonderful\\\", \\\"59\\\": \\\"dry\\\", \\\"60\\\": \\\"busy\\\", \\\"61\\\": \\\"august\\\", \\\"62\\\": \\\"chilly\\\", \\\"63\\\": \\\"rainy\\\", \\\"64\\\": \\\"mild\\\", \\\"65\\\": \\\"wet\\\", \\\"66\\\": \\\"relaxing\\\", \\\"67\\\": \\\"quiet\\\", \\\"68\\\": \\\"hot\\\", \\\"69\\\": \\\"dislikes\\\", \\\"70\\\": \\\"likes\\\", \\\"71\\\": \\\"limes\\\", \\\"72\\\": \\\"lemons\\\", \\\"73\\\": \\\"grapes\\\", \\\"74\\\": \\\"mangoes\\\", \\\"75\\\": \\\"apples\\\", \\\"76\\\": \\\"peaches\\\", \\\"77\\\": \\\"oranges\\\", \\\"78\\\": \\\"pears\\\", \\\"79\\\": \\\"strawberries\\\", \\\"80\\\": \\\"bananas\\\", \\\"81\\\": \\\"to\\\", \\\"82\\\": \\\"grape\\\", \\\"83\\\": \\\"apple\\\", \\\"84\\\": \\\"orange\\\", \\\"85\\\": \\\"lemon\\\", \\\"86\\\": \\\"lime\\\", \\\"87\\\": \\\"banana\\\", \\\"88\\\": \\\"mango\\\", \\\"89\\\": \\\"pear\\\", \\\"90\\\": \\\"strawberry\\\", \\\"91\\\": \\\"peach\\\", \\\"92\\\": \\\"like\\\", \\\"93\\\": \\\"dislike\\\", \\\"94\\\": \\\"they\\\", \\\"95\\\": \\\"that\\\", \\\"96\\\": \\\"i\\\", \\\"97\\\": \\\"we\\\", \\\"98\\\": \\\"you\\\", \\\"99\\\": \\\"animal\\\", \\\"100\\\": \\\"a\\\", \\\"101\\\": \\\"truck\\\", \\\"102\\\": \\\"car\\\", \\\"103\\\": \\\"automobile\\\", \\\"104\\\": \\\"was\\\", \\\"105\\\": \\\"next\\\", \\\"106\\\": \\\"go\\\", \\\"107\\\": \\\"driving\\\", \\\"108\\\": \\\"visit\\\", \\\"109\\\": \\\"little\\\", \\\"110\\\": \\\"big\\\", \\\"111\\\": \\\"old\\\", \\\"112\\\": \\\"yellow\\\", \\\"113\\\": \\\"red\\\", \\\"114\\\": \\\"rusty\\\", \\\"115\\\": \\\"blue\\\", \\\"116\\\": \\\"white\\\", \\\"117\\\": \\\"black\\\", \\\"118\\\": \\\"green\\\", \\\"119\\\": \\\"shiny\\\", \\\"120\\\": \\\"are\\\", \\\"121\\\": \\\"last\\\", \\\"122\\\": \\\"feared\\\", \\\"123\\\": \\\"animals\\\", \\\"124\\\": \\\"this\\\", \\\"125\\\": \\\"plan\\\", \\\"126\\\": \\\"going\\\", \\\"127\\\": \\\"saw\\\", \\\"128\\\": \\\"disliked\\\", \\\"129\\\": \\\"drives\\\", \\\"130\\\": \\\"drove\\\", \\\"131\\\": \\\"between\\\", \\\"132\\\": \\\"translate\\\", \\\"133\\\": \\\"plans\\\", \\\"134\\\": \\\"were\\\", \\\"135\\\": \\\"went\\\", \\\"136\\\": \\\"might\\\", \\\"137\\\": \\\"wanted\\\", \\\"138\\\": \\\"thinks\\\", \\\"139\\\": \\\"spanish\\\", \\\"140\\\": \\\"portuguese\\\", \\\"141\\\": \\\"chinese\\\", \\\"142\\\": \\\"english\\\", \\\"143\\\": \\\"french\\\", \\\"144\\\": \\\"translating\\\", \\\"145\\\": \\\"difficult\\\", \\\"146\\\": \\\"fun\\\", \\\"147\\\": \\\"easy\\\", \\\"148\\\": \\\"wants\\\", \\\"149\\\": \\\"think\\\", \\\"150\\\": \\\"why\\\", \\\"151\\\": \\\"it's\\\", \\\"152\\\": \\\"did\\\", \\\"153\\\": \\\"cat\\\", \\\"154\\\": \\\"shark\\\", \\\"155\\\": \\\"bird\\\", \\\"156\\\": \\\"mouse\\\", \\\"157\\\": \\\"horse\\\", \\\"158\\\": \\\"elephant\\\", \\\"159\\\": \\\"dog\\\", \\\"160\\\": \\\"monkey\\\", \\\"161\\\": \\\"lion\\\", \\\"162\\\": \\\"bear\\\", \\\"163\\\": \\\"rabbit\\\", \\\"164\\\": \\\"snake\\\", \\\"165\\\": \\\"when\\\", \\\"166\\\": \\\"want\\\", \\\"167\\\": \\\"do\\\", \\\"168\\\": \\\"how\\\", \\\"169\\\": \\\"elephants\\\", \\\"170\\\": \\\"horses\\\", \\\"171\\\": \\\"dogs\\\", \\\"172\\\": \\\"sharks\\\", \\\"173\\\": \\\"snakes\\\", \\\"174\\\": \\\"cats\\\", \\\"175\\\": \\\"rabbits\\\", \\\"176\\\": \\\"monkeys\\\", \\\"177\\\": \\\"bears\\\", \\\"178\\\": \\\"birds\\\", \\\"179\\\": \\\"lions\\\", \\\"180\\\": \\\"mice\\\", \\\"181\\\": \\\"didn't\\\", \\\"182\\\": \\\"eiffel\\\", \\\"183\\\": \\\"tower\\\", \\\"184\\\": \\\"grocery\\\", \\\"185\\\": \\\"store\\\", \\\"186\\\": \\\"football\\\", \\\"187\\\": \\\"field\\\", \\\"188\\\": \\\"lake\\\", \\\"189\\\": \\\"school\\\", \\\"190\\\": \\\"would\\\", \\\"191\\\": \\\"aren't\\\", \\\"192\\\": \\\"been\\\", \\\"193\\\": \\\"weather\\\", \\\"194\\\": \\\"does\\\", \\\"195\\\": \\\"has\\\", \\\"196\\\": \\\"isn't\\\", \\\"197\\\": \\\"am\\\", \\\"198\\\": \\\"where\\\", \\\"199\\\": \\\"have\\\"}\", \"word_index\": \"{\\\"is\\\": 1, \\\"in\\\": 2, \\\"it\\\": 3, \\\"during\\\": 4, \\\"the\\\": 5, \\\"but\\\": 6, \\\"and\\\": 7, \\\"sometimes\\\": 8, \\\"usually\\\": 9, \\\"never\\\": 10, \\\"favorite\\\": 11, \\\"least\\\": 12, \\\"fruit\\\": 13, \\\"most\\\": 14, \\\"loved\\\": 15, \\\"liked\\\": 16, \\\"new\\\": 17, \\\"paris\\\": 18, \\\"india\\\": 19, \\\"united\\\": 20, \\\"states\\\": 21, \\\"california\\\": 22, \\\"jersey\\\": 23, \\\"france\\\": 24, \\\"china\\\": 25, \\\"he\\\": 26, \\\"she\\\": 27, \\\"grapefruit\\\": 28, \\\"your\\\": 29, \\\"my\\\": 30, \\\"his\\\": 31, \\\"her\\\": 32, \\\"fall\\\": 33, \\\"june\\\": 34, \\\"spring\\\": 35, \\\"january\\\": 36, \\\"winter\\\": 37, \\\"march\\\": 38, \\\"autumn\\\": 39, \\\"may\\\": 40, \\\"nice\\\": 41, \\\"september\\\": 42, \\\"july\\\": 43, \\\"april\\\": 44, \\\"november\\\": 45, \\\"summer\\\": 46, \\\"december\\\": 47, \\\"february\\\": 48, \\\"our\\\": 49, \\\"their\\\": 50, \\\"freezing\\\": 51, \\\"pleasant\\\": 52, \\\"beautiful\\\": 53, \\\"october\\\": 54, \\\"snowy\\\": 55, \\\"warm\\\": 56, \\\"cold\\\": 57, \\\"wonderful\\\": 58, \\\"dry\\\": 59, \\\"busy\\\": 60, \\\"august\\\": 61, \\\"chilly\\\": 62, \\\"rainy\\\": 63, \\\"mild\\\": 64, \\\"wet\\\": 65, \\\"relaxing\\\": 66, \\\"quiet\\\": 67, \\\"hot\\\": 68, \\\"dislikes\\\": 69, \\\"likes\\\": 70, \\\"limes\\\": 71, \\\"lemons\\\": 72, \\\"grapes\\\": 73, \\\"mangoes\\\": 74, \\\"apples\\\": 75, \\\"peaches\\\": 76, \\\"oranges\\\": 77, \\\"pears\\\": 78, \\\"strawberries\\\": 79, \\\"bananas\\\": 80, \\\"to\\\": 81, \\\"grape\\\": 82, \\\"apple\\\": 83, \\\"orange\\\": 84, \\\"lemon\\\": 85, \\\"lime\\\": 86, \\\"banana\\\": 87, \\\"mango\\\": 88, \\\"pear\\\": 89, \\\"strawberry\\\": 90, \\\"peach\\\": 91, \\\"like\\\": 92, \\\"dislike\\\": 93, \\\"they\\\": 94, \\\"that\\\": 95, \\\"i\\\": 96, \\\"we\\\": 97, \\\"you\\\": 98, \\\"animal\\\": 99, \\\"a\\\": 100, \\\"truck\\\": 101, \\\"car\\\": 102, \\\"automobile\\\": 103, \\\"was\\\": 104, \\\"next\\\": 105, \\\"go\\\": 106, \\\"driving\\\": 107, \\\"visit\\\": 108, \\\"little\\\": 109, \\\"big\\\": 110, \\\"old\\\": 111, \\\"yellow\\\": 112, \\\"red\\\": 113, \\\"rusty\\\": 114, \\\"blue\\\": 115, \\\"white\\\": 116, \\\"black\\\": 117, \\\"green\\\": 118, \\\"shiny\\\": 119, \\\"are\\\": 120, \\\"last\\\": 121, \\\"feared\\\": 122, \\\"animals\\\": 123, \\\"this\\\": 124, \\\"plan\\\": 125, \\\"going\\\": 126, \\\"saw\\\": 127, \\\"disliked\\\": 128, \\\"drives\\\": 129, \\\"drove\\\": 130, \\\"between\\\": 131, \\\"translate\\\": 132, \\\"plans\\\": 133, \\\"were\\\": 134, \\\"went\\\": 135, \\\"might\\\": 136, \\\"wanted\\\": 137, \\\"thinks\\\": 138, \\\"spanish\\\": 139, \\\"portuguese\\\": 140, \\\"chinese\\\": 141, \\\"english\\\": 142, \\\"french\\\": 143, \\\"translating\\\": 144, \\\"difficult\\\": 145, \\\"fun\\\": 146, \\\"easy\\\": 147, \\\"wants\\\": 148, \\\"think\\\": 149, \\\"why\\\": 150, \\\"it's\\\": 151, \\\"did\\\": 152, \\\"cat\\\": 153, \\\"shark\\\": 154, \\\"bird\\\": 155, \\\"mouse\\\": 156, \\\"horse\\\": 157, \\\"elephant\\\": 158, \\\"dog\\\": 159, \\\"monkey\\\": 160, \\\"lion\\\": 161, \\\"bear\\\": 162, \\\"rabbit\\\": 163, \\\"snake\\\": 164, \\\"when\\\": 165, \\\"want\\\": 166, \\\"do\\\": 167, \\\"how\\\": 168, \\\"elephants\\\": 169, \\\"horses\\\": 170, \\\"dogs\\\": 171, \\\"sharks\\\": 172, \\\"snakes\\\": 173, \\\"cats\\\": 174, \\\"rabbits\\\": 175, \\\"monkeys\\\": 176, \\\"bears\\\": 177, \\\"birds\\\": 178, \\\"lions\\\": 179, \\\"mice\\\": 180, \\\"didn't\\\": 181, \\\"eiffel\\\": 182, \\\"tower\\\": 183, \\\"grocery\\\": 184, \\\"store\\\": 185, \\\"football\\\": 186, \\\"field\\\": 187, \\\"lake\\\": 188, \\\"school\\\": 189, \\\"would\\\": 190, \\\"aren't\\\": 191, \\\"been\\\": 192, \\\"weather\\\": 193, \\\"does\\\": 194, \\\"has\\\": 195, \\\"isn't\\\": 196, \\\"am\\\": 197, \\\"where\\\": 198, \\\"have\\\": 199}\"}}"
Task 2/french_tokenizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ "{\"class_name\": \"Tokenizer\", \"config\": {\"num_words\": null, \"filters\": \"!\\\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\", \"lower\": true, \"split\": \" \", \"char_level\": false, \"oov_token\": null, \"document_count\": 137861, \"word_counts\": \"{\\\"new\\\": 11047, \\\"jersey\\\": 11052, \\\"est\\\": 196821, \\\"parfois\\\": 37746, \\\"calme\\\": 7256, \\\"pendant\\\": 10741, \\\"l'\\\": 32917, \\\"automne\\\": 14727, \\\"et\\\": 59851, \\\"il\\\": 84115, \\\"neigeux\\\": 1867, \\\"en\\\": 105768, \\\"avril\\\": 8954, \\\"les\\\": 65255, \\\"\\\\u00e9tats\\\": 11267, \\\"unis\\\": 11270, \\\"g\\\\u00e9n\\\\u00e9ralement\\\": 31292, \\\"froid\\\": 16794, \\\"juillet\\\": 8956, \\\"g\\\\u00e8le\\\": 3622, \\\"habituellement\\\": 6215, \\\"novembre\\\": 8951, \\\"california\\\": 3061, \\\"mars\\\": 9023, \\\"chaud\\\": 16405, \\\"juin\\\": 9133, \\\"l\\\\u00e9g\\\\u00e8re\\\": 63, \\\"fait\\\": 2916, \\\"septembre\\\": 8958, \\\"votre\\\": 9368, \\\"moins\\\": 27557, \\\"aim\\\\u00e9\\\": 25852, \\\"fruit\\\": 23626, \\\"le\\\": 35306, \\\"raisin\\\": 4852, \\\"mais\\\": 63987, \\\"mon\\\": 9403, \\\"la\\\": 49861, \\\"pomme\\\": 4848, \\\"son\\\": 16496, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9\\\": 23305, \\\"l'orange\\\": 4848, \\\"paris\\\": 11334, \\\"relaxant\\\": 8458, \\\"d\\\\u00e9cembre\\\": 8945, \\\"occup\\\\u00e9\\\": 7782, \\\"au\\\": 25738, \\\"printemps\\\": 9100, \\\"jamais\\\": 37215, \\\"chaude\\\": 1124, \\\"notre\\\": 8319, \\\"citron\\\": 4848, \\\"janvier\\\": 9090, \\\"chaux\\\": 4848, \\\"des\\\": 2435, \\\"fruits\\\": 3566, \\\"banane\\\": 4848, \\\"a\\\": 1356, \\\"vu\\\": 645, \\\"un\\\": 698, \\\"vieux\\\": 325, \\\"camion\\\": 1944, \\\"jaune\\\": 972, \\\"inde\\\": 11277, \\\"pluvieux\\\": 7658, \\\"ce\\\": 1572, \\\"chat\\\": 192, \\\"\\\\u00e9tait\\\": 1198, \\\"animal\\\": 2248, \\\"plus\\\": 14934, \\\"n'aime\\\": 3131, \\\"pamplemousse\\\": 10140, \\\"citrons\\\": 11679, \\\"verts\\\": 5835, \\\"californie\\\": 8189, \\\"ne\\\": 2715, \\\"f\\\\u00e9vrier\\\": 8942, \\\"gel\\\": 4886, \\\"chine\\\": 10936, \\\"agr\\\\u00e9able\\\": 17751, \\\"octobre\\\": 8911, \\\"merveilleux\\\": 8704, \\\"doux\\\": 8458, \\\"tranquille\\\": 1437, \\\"\\\\u00e0\\\": 13870, \\\"l'automne\\\": 3411, \\\"\\\\u00e9t\\\\u00e9\\\": 8999, \\\"france\\\": 11170, \\\"mois\\\": 14350, \\\"de\\\": 15070, \\\"mai\\\": 8995, \\\"frisquet\\\": 834, \\\"d\\\\u00e9teste\\\": 3743, \\\"raisins\\\": 5780, \\\"mangues\\\": 5774, \\\"leur\\\": 7855, \\\"mangue\\\": 4899, \\\"poire\\\": 4848, \\\"ao\\\\u00fbt\\\": 8789, \\\"beau\\\": 6387, \\\"pommes\\\": 5844, \\\"p\\\\u00eaches\\\": 5844, \\\"redout\\\\u00e9\\\": 576, \\\"que\\\": 667, \\\"requin\\\": 192, \\\"humide\\\": 8446, \\\"d'\\\": 5100, \\\"sec\\\": 7957, \\\"enneig\\\\u00e9e\\\": 4008, \\\"nous\\\": 2520, \\\"aimons\\\": 1111, \\\"oranges\\\": 5844, \\\"ils\\\": 3221, \\\"aiment\\\": 1126, \\\"poires\\\": 5844, \\\"elle\\\": 12080, \\\"petit\\\": 324, \\\"rouge\\\": 972, \\\"cher\\\": 1308, \\\"aim\\\\u00e9e\\\": 105, \\\"neige\\\": 3016, \\\"trop\\\": 173, \\\"monde\\\": 173, \\\"hiver\\\": 9038, \\\"sont\\\": 1018, \\\"n'aimait\\\": 561, \\\"pas\\\": 4495, \\\"une\\\": 1278, \\\"voiture\\\": 3510, \\\"rouill\\\\u00e9e\\\": 486, \\\"fraises\\\": 5844, \\\"cours\\\": 1927, \\\"j'aime\\\": 966, \\\"fraise\\\": 4848, \\\"bananes\\\": 5844, \\\"va\\\": 355, \\\"aux\\\": 392, \\\"prochain\\\": 1666, \\\"je\\\": 1548, \\\"pr\\\\u00e9vois\\\": 233, \\\"visiter\\\": 908, \\\"belle\\\": 2726, \\\"\\\\u00e9l\\\\u00e9phants\\\": 64, \\\"\\\\u00e9taient\\\": 357, \\\"ses\\\": 402, \\\"animaux\\\": 768, \\\"redout\\\\u00e9s\\\": 190, \\\"vont\\\": 168, \\\"aime\\\": 8870, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9e\\\": 770, \\\"n'aiment\\\": 1111, \\\"i\\\": 150, \\\"comme\\\": 259, \\\"conduit\\\": 1706, \\\"p\\\\u00eache\\\": 4848, \\\"nouvelle\\\": 648, \\\"bleue\\\": 504, \\\"vous\\\": 2541, \\\"aimez\\\": 1053, \\\"cet\\\": 286, \\\"oiseau\\\": 128, \\\"pamplemousses\\\": 552, \\\"pleut\\\": 562, \\\"magnifique\\\": 104, \\\"favori\\\": 3857, \\\"vos\\\": 225, \\\"aim\\\\u00e9s\\\": 237, \\\"chevaux\\\": 64, \\\"n'aimez\\\": 1094, \\\"n'aimons\\\": 97, \\\"souris\\\": 256, \\\"d\\\\u00e9testons\\\": 1001, \\\"all\\\\u00e9\\\": 187, \\\"dernier\\\": 757, \\\"conduisait\\\": 673, \\\"petite\\\": 615, \\\"glaciales\\\": 307, \\\"cheval\\\": 192, \\\"vieille\\\": 647, \\\"chiens\\\": 64, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9s\\\": 383, \\\"blanche\\\": 579, \\\"occup\\\\u00e9e\\\": 836, \\\"nos\\\": 613, \\\"l'\\\\u00e9l\\\\u00e9phant\\\": 64, \\\"nouveau\\\": 502, \\\"noire\\\": 602, \\\"pluies\\\": 367, \\\"pense\\\": 540, \\\"qu'il\\\": 393, \\\"difficile\\\": 260, \\\"traduire\\\": 501, \\\"entre\\\": 540, \\\"espagnol\\\": 312, \\\"portugais\\\": 312, \\\"bleu\\\": 468, \\\"rouill\\\\u00e9\\\": 454, \\\"aimait\\\": 707, \\\"grande\\\": 459, \\\"verte\\\": 628, \\\"traduction\\\": 277, \\\"amusant\\\": 260, \\\"cette\\\": 1239, \\\"vert\\\": 344, \\\"grand\\\": 81, \\\"blanc\\\": 393, \\\"volant\\\": 165, \\\"gros\\\": 258, \\\"o\\\\u00f9\\\": 12, \\\"chien\\\": 192, \\\"leurs\\\": 1072, \\\"pourquoi\\\": 240, \\\"l'automobile\\\": 100, \\\"pourrait\\\": 252, \\\"se\\\": 461, \\\"rendre\\\": 350, \\\"pr\\\\u00e9voyons\\\": 232, \\\"maillot\\\": 173, \\\"grosse\\\": 185, \\\"brillant\\\": 587, \\\"pr\\\\u00e9voient\\\": 75, \\\"mouill\\\\u00e9e\\\": 7, \\\"lui\\\": 70, \\\"d\\\\u00e9tendre\\\": 111, \\\"automobile\\\": 278, \\\"pourraient\\\": 126, \\\"aller\\\": 1180, \\\"mes\\\": 297, \\\"s\\\\u00e8che\\\": 837, \\\"l'oiseau\\\": 64, \\\"pluie\\\": 174, \\\"requins\\\": 64, \\\"noir\\\": 370, \\\"singe\\\": 192, \\\"d\\\\u00e9testait\\\": 87, \\\"comment\\\": 67, \\\"temps\\\": 33, \\\"dans\\\": 12, \\\"lion\\\": 192, \\\"pr\\\\u00e9voit\\\": 75, \\\"ours\\\": 192, \\\"porcelaine\\\": 17, \\\"cl\\\\u00e9mentes\\\": 200, \\\"pla\\\\u00eet\\\": 13, \\\"proches\\\": 20, \\\"brillante\\\": 385, \\\"lapin\\\": 192, \\\"l'ours\\\": 64, \\\"chinois\\\": 312, \\\"quand\\\": 144, \\\"tour\\\": 57, \\\"eiffel\\\": 57, \\\"allons\\\": 45, \\\"l'\\\\u00e9picerie\\\": 57, \\\"voulait\\\": 252, \\\"c\\\\u00e9page\\\": 60, \\\"t\\\": 18, \\\"terrain\\\": 57, \\\"football\\\": 57, \\\"du\\\": 39, \\\"veut\\\": 252, \\\"\\\\u00e9l\\\\u00e9phant\\\": 128, \\\"gel\\\\u00e9\\\": 94, \\\"bien\\\": 77, \\\"enneig\\\\u00e9\\\": 7, \\\"gel\\\\u00e9s\\\": 5, \\\"serpent\\\": 192, \\\"all\\\\u00e9s\\\": 150, \\\"all\\\\u00e9e\\\": 150, \\\"envisage\\\": 360, \\\"peu\\\": 41, \\\"mouill\\\\u00e9\\\": 273, \\\"serpents\\\": 64, \\\"pensez\\\": 60, \\\"facile\\\": 260, \\\"anglais\\\": 312, \\\"fran\\\\u00e7ais\\\": 312, \\\"voulez\\\": 12, \\\"grandes\\\": 16, \\\"avez\\\": 162, \\\"aimeraient\\\": 12, \\\"allez\\\": 45, \\\"chats\\\": 64, \\\"lapins\\\": 64, \\\"visite\\\": 68, \\\"ont\\\": 194, \\\"intention\\\": 206, \\\"n'est\\\": 47, \\\"derni\\\\u00e8re\\\": 24, \\\"voulaient\\\": 126, \\\"singes\\\": 64, \\\"\\\\u00eates\\\": 24, \\\"qu'elle\\\": 26, \\\"vers\\\": 76, \\\"lac\\\": 57, \\\"pousse\\\": 41, \\\"d\\\\u00e9testez\\\": 17, \\\"manguiers\\\": 19, \\\"grands\\\": 9, \\\"l'\\\\u00e9cole\\\": 57, \\\"l'animal\\\": 56, \\\"at\\\": 32, \\\"oiseaux\\\": 64, \\\"ressort\\\": 2, \\\"petits\\\": 10, \\\"n'a\\\": 12, \\\"veulent\\\": 126, \\\"rouille\\\": 32, \\\"frais\\\": 20, \\\"limes\\\": 9, \\\"lions\\\": 64, \\\"douce\\\": 14, \\\"envisagent\\\": 9, \\\"petites\\\": 26, \\\"vais\\\": 24, \\\"durant\\\": 14, \\\"c'est\\\": 17, \\\"cong\\\\u00e9lation\\\": 14, \\\"allions\\\": 1, \\\"voudrait\\\": 24, \\\"d\\\\u00e9tend\\\": 2, \\\"trouv\\\\u00e9\\\": 1, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9es\\\": 16, \\\"conduite\\\": 6, \\\"grosses\\\": 8, \\\"b\\\\u00e9nigne\\\": 8, \\\"avons\\\": 19, \\\"sur\\\": 28, \\\"redout\\\\u00e9e\\\": 2, \\\"etats\\\": 3, \\\"moindres\\\": 7, \\\"n'\\\\u00eates\\\": 3, \\\"vit\\\": 3, \\\"as\\\": 1, \\\"tu\\\": 2, \\\"qui\\\": 2, \\\"faire\\\": 1, \\\"traduis\\\": 2, \\\"favoris\\\": 1, \\\"souvent\\\": 1, \\\"es\\\": 1, \\\"appr\\\\u00e9ci\\\\u00e9\\\": 2, \\\"moteur\\\": 1, \\\"tout\\\": 4}\", \"word_docs\": \"{\\\"automne\\\": 14398, \\\"new\\\": 11047, \\\"calme\\\": 7256, \\\"parfois\\\": 32946, \\\"il\\\": 84114, \\\"et\\\": 59851, \\\"jersey\\\": 11052, \\\"l'\\\": 28828, \\\"est\\\": 104279, \\\"avril\\\": 8954, \\\"neigeux\\\": 1867, \\\"en\\\": 72048, \\\"pendant\\\": 10741, \\\"g\\\\u00e9n\\\\u00e9ralement\\\": 28101, \\\"g\\\\u00e8le\\\": 3622, \\\"juillet\\\": 8956, \\\"habituellement\\\": 6112, \\\"unis\\\": 11270, \\\"les\\\": 33404, \\\"froid\\\": 16323, \\\"novembre\\\": 8951, \\\"\\\\u00e9tats\\\": 11267, \\\"california\\\": 3061, \\\"juin\\\": 9133, \\\"mars\\\": 9023, \\\"chaud\\\": 15908, \\\"septembre\\\": 8958, \\\"l\\\\u00e9g\\\\u00e8re\\\": 63, \\\"fait\\\": 2916, \\\"pomme\\\": 4848, \\\"la\\\": 39038, \\\"fruit\\\": 23626, \\\"votre\\\": 9368, \\\"raisin\\\": 4852, \\\"mon\\\": 9403, \\\"le\\\": 28877, \\\"mais\\\": 63987, \\\"moins\\\": 14357, \\\"aim\\\\u00e9\\\": 14038, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9\\\": 14722, \\\"l'orange\\\": 4848, \\\"son\\\": 15184, \\\"d\\\\u00e9cembre\\\": 8945, \\\"paris\\\": 11334, \\\"relaxant\\\": 8458, \\\"chaude\\\": 1124, \\\"au\\\": 24675, \\\"printemps\\\": 9100, \\\"jamais\\\": 32618, \\\"occup\\\\u00e9\\\": 7782, \\\"notre\\\": 8319, \\\"citron\\\": 4848, \\\"janvier\\\": 9090, \\\"des\\\": 2435, \\\"chaux\\\": 4848, \\\"banane\\\": 4848, \\\"fruits\\\": 3566, \\\"vu\\\": 645, \\\"un\\\": 698, \\\"camion\\\": 1944, \\\"vieux\\\": 325, \\\"a\\\": 1356, \\\"jaune\\\": 972, \\\"pluvieux\\\": 7658, \\\"inde\\\": 11277, \\\"plus\\\": 8334, \\\"chat\\\": 192, \\\"animal\\\": 2248, \\\"ce\\\": 1572, \\\"\\\\u00e9tait\\\": 1198, \\\"pamplemousse\\\": 10140, \\\"n'aime\\\": 3131, \\\"citrons\\\": 10568, \\\"verts\\\": 5835, \\\"californie\\\": 8189, \\\"ne\\\": 2707, \\\"gel\\\": 4886, \\\"f\\\\u00e9vrier\\\": 8942, \\\"chine\\\": 10936, \\\"octobre\\\": 8910, \\\"agr\\\\u00e9able\\\": 17151, \\\"merveilleux\\\": 8704, \\\"doux\\\": 8458, \\\"tranquille\\\": 1437, \\\"l'automne\\\": 3411, \\\"\\\\u00e0\\\": 13381, \\\"\\\\u00e9t\\\\u00e9\\\": 8999, \\\"france\\\": 11170, \\\"de\\\": 14740, \\\"mai\\\": 8995, \\\"frisquet\\\": 834, \\\"mois\\\": 14311, \\\"raisins\\\": 5780, \\\"d\\\\u00e9teste\\\": 3743, \\\"mangues\\\": 5774, \\\"mangue\\\": 4899, \\\"leur\\\": 7855, \\\"poire\\\": 4848, \\\"ao\\\\u00fbt\\\": 8789, \\\"beau\\\": 6387, \\\"pommes\\\": 5844, \\\"p\\\\u00eaches\\\": 5844, \\\"redout\\\\u00e9\\\": 576, \\\"que\\\": 667, \\\"requin\\\": 192, \\\"humide\\\": 8446, \\\"d'\\\": 5100, \\\"sec\\\": 7957, \\\"enneig\\\\u00e9e\\\": 4008, \\\"nous\\\": 2520, \\\"aimons\\\": 1111, \\\"oranges\\\": 5844, \\\"poires\\\": 5844, \\\"aiment\\\": 1126, \\\"ils\\\": 3221, \\\"petit\\\": 324, \\\"rouge\\\": 972, \\\"elle\\\": 12080, \\\"cher\\\": 1308, \\\"aim\\\\u00e9e\\\": 105, \\\"monde\\\": 173, \\\"neige\\\": 3016, \\\"trop\\\": 173, \\\"hiver\\\": 9038, \\\"sont\\\": 1018, \\\"une\\\": 1278, \\\"voiture\\\": 3510, \\\"rouill\\\\u00e9e\\\": 486, \\\"pas\\\": 4495, \\\"n'aimait\\\": 561, \\\"fraises\\\": 5844, \\\"cours\\\": 1927, \\\"j'aime\\\": 966, \\\"fraise\\\": 4848, \\\"bananes\\\": 5844, \\\"va\\\": 355, \\\"prochain\\\": 1666, \\\"aux\\\": 392, \\\"pr\\\\u00e9vois\\\": 233, \\\"je\\\": 1548, \\\"visiter\\\": 908, \\\"belle\\\": 2725, \\\"\\\\u00e9taient\\\": 357, \\\"ses\\\": 402, \\\"redout\\\\u00e9s\\\": 190, \\\"animaux\\\": 768, \\\"\\\\u00e9l\\\\u00e9phants\\\": 64, \\\"vont\\\": 168, \\\"aime\\\": 8870, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9e\\\": 770, \\\"n'aiment\\\": 1111, \\\"comme\\\": 259, \\\"i\\\": 150, \\\"conduit\\\": 1706, \\\"p\\\\u00eache\\\": 4848, \\\"bleue\\\": 504, \\\"nouvelle\\\": 648, \\\"vous\\\": 2541, \\\"aimez\\\": 1053, \\\"oiseau\\\": 128, \\\"cet\\\": 286, \\\"pamplemousses\\\": 552, \\\"pleut\\\": 562, \\\"magnifique\\\": 104, \\\"favori\\\": 3857, \\\"vos\\\": 225, \\\"chevaux\\\": 64, \\\"aim\\\\u00e9s\\\": 237, \\\"n'aimez\\\": 1094, \\\"n'aimons\\\": 97, \\\"souris\\\": 256, \\\"d\\\\u00e9testons\\\": 1001, \\\"all\\\\u00e9\\\": 187, \\\"dernier\\\": 757, \\\"petite\\\": 615, \\\"conduisait\\\": 673, \\\"glaciales\\\": 307, \\\"cheval\\\": 192, \\\"vieille\\\": 647, \\\"chiens\\\": 64, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9s\\\": 383, \\\"blanche\\\": 579, \\\"occup\\\\u00e9e\\\": 836, \\\"nos\\\": 613, \\\"l'\\\\u00e9l\\\\u00e9phant\\\": 64, \\\"nouveau\\\": 502, \\\"noire\\\": 602, \\\"pluies\\\": 367, \\\"difficile\\\": 260, \\\"portugais\\\": 312, \\\"qu'il\\\": 393, \\\"entre\\\": 540, \\\"pense\\\": 540, \\\"traduire\\\": 501, \\\"espagnol\\\": 312, \\\"rouill\\\\u00e9\\\": 454, \\\"bleu\\\": 468, \\\"aimait\\\": 707, \\\"verte\\\": 628, \\\"grande\\\": 459, \\\"traduction\\\": 277, \\\"amusant\\\": 260, \\\"cette\\\": 1239, \\\"vert\\\": 344, \\\"grand\\\": 81, \\\"blanc\\\": 393, \\\"gros\\\": 258, \\\"volant\\\": 165, \\\"o\\\\u00f9\\\": 12, \\\"chien\\\": 192, \\\"leurs\\\": 1072, \\\"pourquoi\\\": 240, \\\"l'automobile\\\": 100, \\\"se\\\": 461, \\\"rendre\\\": 350, \\\"pourrait\\\": 252, \\\"pr\\\\u00e9voyons\\\": 232, \\\"maillot\\\": 173, \\\"grosse\\\": 185, \\\"brillant\\\": 587, \\\"pr\\\\u00e9voient\\\": 75, \\\"mouill\\\\u00e9e\\\": 7, \\\"lui\\\": 70, \\\"d\\\\u00e9tendre\\\": 111, \\\"automobile\\\": 278, \\\"aller\\\": 1180, \\\"pourraient\\\": 126, \\\"mes\\\": 297, \\\"s\\\\u00e8che\\\": 837, \\\"l'oiseau\\\": 64, \\\"pluie\\\": 174, \\\"requins\\\": 64, \\\"noir\\\": 370, \\\"singe\\\": 192, \\\"d\\\\u00e9testait\\\": 87, \\\"temps\\\": 33, \\\"dans\\\": 12, \\\"comment\\\": 67, \\\"lion\\\": 192, \\\"pr\\\\u00e9voit\\\": 75, \\\"ours\\\": 192, \\\"porcelaine\\\": 17, \\\"cl\\\\u00e9mentes\\\": 200, \\\"pla\\\\u00eet\\\": 13, \\\"proches\\\": 20, \\\"brillante\\\": 385, \\\"lapin\\\": 192, \\\"l'ours\\\": 64, \\\"chinois\\\": 312, \\\"tour\\\": 57, \\\"eiffel\\\": 57, \\\"quand\\\": 144, \\\"allons\\\": 45, \\\"l'\\\\u00e9picerie\\\": 57, \\\"voulait\\\": 252, \\\"c\\\\u00e9page\\\": 60, \\\"football\\\": 57, \\\"t\\\": 18, \\\"terrain\\\": 57, \\\"du\\\": 39, \\\"veut\\\": 252, \\\"\\\\u00e9l\\\\u00e9phant\\\": 128, \\\"gel\\\\u00e9\\\": 94, \\\"bien\\\": 77, \\\"enneig\\\\u00e9\\\": 7, \\\"gel\\\\u00e9s\\\": 5, \\\"serpent\\\": 192, \\\"all\\\\u00e9s\\\": 150, \\\"all\\\\u00e9e\\\": 150, \\\"envisage\\\": 360, \\\"peu\\\": 41, \\\"mouill\\\\u00e9\\\": 273, \\\"serpents\\\": 64, \\\"facile\\\": 260, \\\"pensez\\\": 60, \\\"anglais\\\": 312, \\\"fran\\\\u00e7ais\\\": 312, \\\"voulez\\\": 12, \\\"grandes\\\": 16, \\\"avez\\\": 162, \\\"aimeraient\\\": 12, \\\"allez\\\": 45, \\\"chats\\\": 64, \\\"lapins\\\": 64, \\\"visite\\\": 68, \\\"ont\\\": 194, \\\"intention\\\": 206, \\\"n'est\\\": 47, \\\"derni\\\\u00e8re\\\": 24, \\\"voulaient\\\": 126, \\\"singes\\\": 64, \\\"\\\\u00eates\\\": 24, \\\"qu'elle\\\": 26, \\\"vers\\\": 76, \\\"lac\\\": 57, \\\"pousse\\\": 41, \\\"d\\\\u00e9testez\\\": 17, \\\"manguiers\\\": 19, \\\"grands\\\": 9, \\\"l'\\\\u00e9cole\\\": 57, \\\"l'animal\\\": 56, \\\"at\\\": 32, \\\"oiseaux\\\": 64, \\\"ressort\\\": 2, \\\"petits\\\": 10, \\\"n'a\\\": 12, \\\"veulent\\\": 126, \\\"rouille\\\": 32, \\\"frais\\\": 20, \\\"limes\\\": 9, \\\"lions\\\": 64, \\\"douce\\\": 14, \\\"envisagent\\\": 9, \\\"petites\\\": 26, \\\"vais\\\": 24, \\\"durant\\\": 14, \\\"c'est\\\": 17, \\\"cong\\\\u00e9lation\\\": 14, \\\"allions\\\": 1, \\\"voudrait\\\": 24, \\\"d\\\\u00e9tend\\\": 2, \\\"trouv\\\\u00e9\\\": 1, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9es\\\": 16, \\\"conduite\\\": 6, \\\"grosses\\\": 8, \\\"b\\\\u00e9nigne\\\": 8, \\\"avons\\\": 19, \\\"sur\\\": 28, \\\"redout\\\\u00e9e\\\": 2, \\\"etats\\\": 3, \\\"moindres\\\": 7, \\\"n'\\\\u00eates\\\": 3, \\\"vit\\\": 3, \\\"tu\\\": 2, \\\"as\\\": 1, \\\"qui\\\": 2, \\\"faire\\\": 1, \\\"traduis\\\": 2, \\\"favoris\\\": 1, \\\"souvent\\\": 1, \\\"es\\\": 1, \\\"appr\\\\u00e9ci\\\\u00e9\\\": 1, \\\"moteur\\\": 1, \\\"tout\\\": 4}\", \"index_docs\": \"{\\\"24\\\": 14398, \\\"35\\\": 11047, \\\"67\\\": 7256, \\\"8\\\": 32946, \\\"3\\\": 84114, \\\"6\\\": 59851, \\\"34\\\": 11052, \\\"11\\\": 28828, \\\"1\\\": 104279, \\\"50\\\": 8954, \\\"112\\\": 1867, \\\"2\\\": 72048, \\\"37\\\": 10741, \\\"12\\\": 28101, \\\"95\\\": 3622, \\\"49\\\": 8956, \\\"69\\\": 6112, \\\"31\\\": 11270, \\\"4\\\": 33404, \\\"19\\\": 16323, \\\"51\\\": 8951, \\\"32\\\": 11267, \\\"101\\\": 3061, \\\"41\\\": 9133, \\\"45\\\": 9023, \\\"21\\\": 15908, \\\"48\\\": 8958, \\\"269\\\": 63, \\\"103\\\": 2916, \\\"83\\\": 4848, \\\"7\\\": 39038, \\\"16\\\": 23626, \\\"40\\\": 9368, \\\"82\\\": 4852, \\\"39\\\": 9403, \\\"10\\\": 28877, \\\"5\\\": 63987, \\\"13\\\": 14357, \\\"14\\\": 14038, \\\"17\\\": 14722, \\\"84\\\": 4848, \\\"20\\\": 15184, \\\"52\\\": 8945, \\\"29\\\": 11334, \\\"58\\\": 8458, \\\"125\\\": 1124, \\\"15\\\": 24675, \\\"42\\\": 9100, \\\"9\\\": 32618, \\\"65\\\": 7782, \\\"61\\\": 8319, \\\"85\\\": 4848, \\\"43\\\": 9090, \\\"108\\\": 2435, \\\"86\\\": 4848, \\\"87\\\": 4848, \\\"96\\\": 3566, \\\"149\\\": 645, \\\"144\\\": 698, \\\"110\\\": 1944, \\\"183\\\": 325, \\\"118\\\": 1356, \\\"133\\\": 972, \\\"66\\\": 7658, \\\"30\\\": 11277, \\\"23\\\": 8334, \\\"213\\\": 192, \\\"109\\\": 2248, \\\"115\\\": 1572, \\\"122\\\": 1198, \\\"38\\\": 10140, \\\"100\\\": 3131, \\\"28\\\": 10568, \\\"76\\\": 5835, \\\"62\\\": 8189, \\\"105\\\": 2707, \\\"81\\\": 4886, \\\"53\\\": 8942, \\\"36\\\": 10936, \\\"54\\\": 8910, \\\"18\\\": 17151, \\\"57\\\": 8704, \\\"59\\\": 8458, \\\"117\\\": 1437, \\\"98\\\": 3411, \\\"26\\\": 13381, \\\"46\\\": 8999, \\\"33\\\": 11170, \\\"22\\\": 14740, \\\"47\\\": 8995, \\\"139\\\": 834, \\\"25\\\": 14311, \\\"77\\\": 5780, \\\"94\\\": 3743, \\\"78\\\": 5774, \\\"80\\\": 4899, \\\"64\\\": 7855, \\\"88\\\": 4848, \\\"56\\\": 8789, \\\"68\\\": 6387, \\\"70\\\": 5844, \\\"71\\\": 5844, \\\"156\\\": 576, \\\"146\\\": 667, \\\"214\\\": 192, \\\"60\\\": 8446, \\\"79\\\": 5100, \\\"63\\\": 7957, \\\"92\\\": 4008, \\\"107\\\": 2520, \\\"126\\\": 1111, \\\"72\\\": 5844, \\\"73\\\": 5844, \\\"124\\\": 1126, \\\"99\\\": 3221, \\\"184\\\": 324, \\\"134\\\": 972, \\\"27\\\": 12080, \\\"119\\\": 1308, \\\"242\\\": 105, \\\"227\\\": 173, \\\"102\\\": 3016, \\\"226\\\": 173, \\\"44\\\": 9038, \\\"131\\\": 1018, \\\"120\\\": 1278, \\\"97\\\": 3510, \\\"165\\\": 486, \\\"91\\\": 4495, \\\"158\\\": 561, \\\"74\\\": 5844, \\\"111\\\": 1927, \\\"135\\\": 966, \\\"89\\\": 4848, \\\"75\\\": 5844, \\\"180\\\": 355, \\\"114\\\": 1666, \\\"173\\\": 392, \\\"207\\\": 233, \\\"116\\\": 1548, \\\"136\\\": 908, \\\"104\\\": 2725, \\\"179\\\": 357, \\\"170\\\": 402, \\\"222\\\": 190, \\\"141\\\": 768, \\\"256\\\": 64, \\\"229\\\": 168, \\\"55\\\": 8870, \\\"140\\\": 770, \\\"127\\\": 1111, \\\"199\\\": 259, \\\"232\\\": 150, \\\"113\\\": 1706, \\\"90\\\": 4848, \\\"162\\\": 504, \\\"147\\\": 648, \\\"106\\\": 2541, \\\"130\\\": 1053, \\\"236\\\": 128, \\\"192\\\": 286, \\\"159\\\": 552, \\\"157\\\": 562, \\\"243\\\": 104, \\\"93\\\": 3857, \\\"209\\\": 225, \\\"257\\\": 64, \\\"206\\\": 237, \\\"128\\\": 1094, \\\"245\\\": 97, \\\"201\\\": 256, \\\"132\\\": 1001, \\\"223\\\": 187, \\\"142\\\": 757, \\\"151\\\": 615, \\\"145\\\": 673, \\\"190\\\": 307, \\\"215\\\": 192, \\\"148\\\": 647, \\\"258\\\": 64, \\\"175\\\": 383, \\\"155\\\": 579, \\\"138\\\": 836, \\\"152\\\": 613, \\\"259\\\": 64, \\\"163\\\": 502, \\\"153\\\": 602, \\\"177\\\": 367, \\\"196\\\": 260, \\\"186\\\": 312, \\\"171\\\": 393, \\\"161\\\": 540, \\\"160\\\": 540, \\\"164\\\": 501, \\\"185\\\": 312, \\\"169\\\": 454, \\\"166\\\": 468, \\\"143\\\": 707, \\\"150\\\": 628, \\\"168\\\": 459, \\\"194\\\": 277, \\\"197\\\": 260, \\\"121\\\": 1239, \\\"182\\\": 344, \\\"248\\\": 81, \\\"172\\\": 393, \\\"200\\\": 258, \\\"230\\\": 165, \\\"310\\\": 12, \\\"216\\\": 192, \\\"129\\\": 1072, \\\"205\\\": 240, \\\"244\\\": 100, \\\"167\\\": 461, \\\"181\\\": 350, \\\"202\\\": 252, \\\"208\\\": 232, \\\"228\\\": 173, \\\"224\\\": 185, \\\"154\\\": 587, \\\"251\\\": 75, \\\"321\\\": 7, \\\"253\\\": 70, \\\"241\\\": 111, \\\"193\\\": 278, \\\"123\\\": 1180, \\\"238\\\": 126, \\\"191\\\": 297, \\\"137\\\": 837, \\\"260\\\": 64, \\\"225\\\": 174, \\\"261\\\": 64, \\\"176\\\": 370, \\\"217\\\": 192, \\\"247\\\": 87, \\\"286\\\": 33, \\\"311\\\": 12, \\\"255\\\": 67, \\\"218\\\": 192, \\\"252\\\": 75, \\\"219\\\": 192, \\\"301\\\": 17, \\\"211\\\": 200, \\\"309\\\": 13, \\\"296\\\": 20, \\\"174\\\": 385, \\\"220\\\": 192, \\\"262\\\": 64, \\\"187\\\": 312, \\\"272\\\": 57, \\\"273\\\": 57, \\\"235\\\": 144, \\\"281\\\": 45, \\\"274\\\": 57, \\\"203\\\": 252, \\\"270\\\": 60, \\\"276\\\": 57, \\\"300\\\": 18, \\\"275\\\": 57, \\\"285\\\": 39, \\\"204\\\": 252, \\\"237\\\": 128, \\\"246\\\": 94, \\\"249\\\": 77, \\\"322\\\": 7, \\\"325\\\": 5, \\\"221\\\": 192, \\\"233\\\": 150, \\\"234\\\": 150, \\\"178\\\": 360, \\\"283\\\": 41, \\\"195\\\": 273, \\\"263\\\": 64, \\\"198\\\": 260, \\\"271\\\": 60, \\\"188\\\": 312, \\\"189\\\": 312, \\\"312\\\": 12, \\\"304\\\": 16, \\\"231\\\": 162, \\\"313\\\": 12, \\\"282\\\": 45, \\\"264\\\": 64, \\\"265\\\": 64, \\\"254\\\": 68, \\\"212\\\": 194, \\\"210\\\": 206, \\\"280\\\": 47, \\\"292\\\": 24, \\\"239\\\": 126, \\\"266\\\": 64, \\\"293\\\": 24, \\\"290\\\": 26, \\\"250\\\": 76, \\\"277\\\": 57, \\\"284\\\": 41, \\\"302\\\": 17, \\\"298\\\": 19, \\\"316\\\": 9, \\\"278\\\": 57, \\\"279\\\": 56, \\\"287\\\": 32, \\\"267\\\": 64, \\\"330\\\": 2, \\\"315\\\": 10, \\\"314\\\": 12, \\\"240\\\": 126, \\\"288\\\": 32, \\\"297\\\": 20, \\\"317\\\": 9, \\\"268\\\": 64, \\\"306\\\": 14, \\\"318\\\": 9, \\\"291\\\": 26, \\\"294\\\": 24, \\\"307\\\": 14, \\\"303\\\": 17, \\\"308\\\": 14, \\\"337\\\": 1, \\\"295\\\": 24, \\\"331\\\": 2, \\\"338\\\": 1, \\\"305\\\": 16, \\\"324\\\": 6, \\\"319\\\": 8, \\\"320\\\": 8, \\\"299\\\": 19, \\\"289\\\": 28, \\\"332\\\": 2, \\\"327\\\": 3, \\\"323\\\": 7, \\\"328\\\": 3, \\\"329\\\": 3, \\\"333\\\": 2, \\\"339\\\": 1, \\\"334\\\": 2, \\\"340\\\": 1, \\\"335\\\": 2, \\\"341\\\": 1, \\\"342\\\": 1, \\\"343\\\": 1, \\\"336\\\": 1, \\\"344\\\": 1, \\\"326\\\": 4}\", \"index_word\": \"{\\\"1\\\": \\\"est\\\", \\\"2\\\": \\\"en\\\", \\\"3\\\": \\\"il\\\", \\\"4\\\": \\\"les\\\", \\\"5\\\": \\\"mais\\\", \\\"6\\\": \\\"et\\\", \\\"7\\\": \\\"la\\\", \\\"8\\\": \\\"parfois\\\", \\\"9\\\": \\\"jamais\\\", \\\"10\\\": \\\"le\\\", \\\"11\\\": \\\"l'\\\", \\\"12\\\": \\\"g\\\\u00e9n\\\\u00e9ralement\\\", \\\"13\\\": \\\"moins\\\", \\\"14\\\": \\\"aim\\\\u00e9\\\", \\\"15\\\": \\\"au\\\", \\\"16\\\": \\\"fruit\\\", \\\"17\\\": \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9\\\", \\\"18\\\": \\\"agr\\\\u00e9able\\\", \\\"19\\\": \\\"froid\\\", \\\"20\\\": \\\"son\\\", \\\"21\\\": \\\"chaud\\\", \\\"22\\\": \\\"de\\\", \\\"23\\\": \\\"plus\\\", \\\"24\\\": \\\"automne\\\", \\\"25\\\": \\\"mois\\\", \\\"26\\\": \\\"\\\\u00e0\\\", \\\"27\\\": \\\"elle\\\", \\\"28\\\": \\\"citrons\\\", \\\"29\\\": \\\"paris\\\", \\\"30\\\": \\\"inde\\\", \\\"31\\\": \\\"unis\\\", \\\"32\\\": \\\"\\\\u00e9tats\\\", \\\"33\\\": \\\"france\\\", \\\"34\\\": \\\"jersey\\\", \\\"35\\\": \\\"new\\\", \\\"36\\\": \\\"chine\\\", \\\"37\\\": \\\"pendant\\\", \\\"38\\\": \\\"pamplemousse\\\", \\\"39\\\": \\\"mon\\\", \\\"40\\\": \\\"votre\\\", \\\"41\\\": \\\"juin\\\", \\\"42\\\": \\\"printemps\\\", \\\"43\\\": \\\"janvier\\\", \\\"44\\\": \\\"hiver\\\", \\\"45\\\": \\\"mars\\\", \\\"46\\\": \\\"\\\\u00e9t\\\\u00e9\\\", \\\"47\\\": \\\"mai\\\", \\\"48\\\": \\\"septembre\\\", \\\"49\\\": \\\"juillet\\\", \\\"50\\\": \\\"avril\\\", \\\"51\\\": \\\"novembre\\\", \\\"52\\\": \\\"d\\\\u00e9cembre\\\", \\\"53\\\": \\\"f\\\\u00e9vrier\\\", \\\"54\\\": \\\"octobre\\\", \\\"55\\\": \\\"aime\\\", \\\"56\\\": \\\"ao\\\\u00fbt\\\", \\\"57\\\": \\\"merveilleux\\\", \\\"58\\\": \\\"relaxant\\\", \\\"59\\\": \\\"doux\\\", \\\"60\\\": \\\"humide\\\", \\\"61\\\": \\\"notre\\\", \\\"62\\\": \\\"californie\\\", \\\"63\\\": \\\"sec\\\", \\\"64\\\": \\\"leur\\\", \\\"65\\\": \\\"occup\\\\u00e9\\\", \\\"66\\\": \\\"pluvieux\\\", \\\"67\\\": \\\"calme\\\", \\\"68\\\": \\\"beau\\\", \\\"69\\\": \\\"habituellement\\\", \\\"70\\\": \\\"pommes\\\", \\\"71\\\": \\\"p\\\\u00eaches\\\", \\\"72\\\": \\\"oranges\\\", \\\"73\\\": \\\"poires\\\", \\\"74\\\": \\\"fraises\\\", \\\"75\\\": \\\"bananes\\\", \\\"76\\\": \\\"verts\\\", \\\"77\\\": \\\"raisins\\\", \\\"78\\\": \\\"mangues\\\", \\\"79\\\": \\\"d'\\\", \\\"80\\\": \\\"mangue\\\", \\\"81\\\": \\\"gel\\\", \\\"82\\\": \\\"raisin\\\", \\\"83\\\": \\\"pomme\\\", \\\"84\\\": \\\"l'orange\\\", \\\"85\\\": \\\"citron\\\", \\\"86\\\": \\\"chaux\\\", \\\"87\\\": \\\"banane\\\", \\\"88\\\": \\\"poire\\\", \\\"89\\\": \\\"fraise\\\", \\\"90\\\": \\\"p\\\\u00eache\\\", \\\"91\\\": \\\"pas\\\", \\\"92\\\": \\\"enneig\\\\u00e9e\\\", \\\"93\\\": \\\"favori\\\", \\\"94\\\": \\\"d\\\\u00e9teste\\\", \\\"95\\\": \\\"g\\\\u00e8le\\\", \\\"96\\\": \\\"fruits\\\", \\\"97\\\": \\\"voiture\\\", \\\"98\\\": \\\"l'automne\\\", \\\"99\\\": \\\"ils\\\", \\\"100\\\": \\\"n'aime\\\", \\\"101\\\": \\\"california\\\", \\\"102\\\": \\\"neige\\\", \\\"103\\\": \\\"fait\\\", \\\"104\\\": \\\"belle\\\", \\\"105\\\": \\\"ne\\\", \\\"106\\\": \\\"vous\\\", \\\"107\\\": \\\"nous\\\", \\\"108\\\": \\\"des\\\", \\\"109\\\": \\\"animal\\\", \\\"110\\\": \\\"camion\\\", \\\"111\\\": \\\"cours\\\", \\\"112\\\": \\\"neigeux\\\", \\\"113\\\": \\\"conduit\\\", \\\"114\\\": \\\"prochain\\\", \\\"115\\\": \\\"ce\\\", \\\"116\\\": \\\"je\\\", \\\"117\\\": \\\"tranquille\\\", \\\"118\\\": \\\"a\\\", \\\"119\\\": \\\"cher\\\", \\\"120\\\": \\\"une\\\", \\\"121\\\": \\\"cette\\\", \\\"122\\\": \\\"\\\\u00e9tait\\\", \\\"123\\\": \\\"aller\\\", \\\"124\\\": \\\"aiment\\\", \\\"125\\\": \\\"chaude\\\", \\\"126\\\": \\\"aimons\\\", \\\"127\\\": \\\"n'aiment\\\", \\\"128\\\": \\\"n'aimez\\\", \\\"129\\\": \\\"leurs\\\", \\\"130\\\": \\\"aimez\\\", \\\"131\\\": \\\"sont\\\", \\\"132\\\": \\\"d\\\\u00e9testons\\\", \\\"133\\\": \\\"jaune\\\", \\\"134\\\": \\\"rouge\\\", \\\"135\\\": \\\"j'aime\\\", \\\"136\\\": \\\"visiter\\\", \\\"137\\\": \\\"s\\\\u00e8che\\\", \\\"138\\\": \\\"occup\\\\u00e9e\\\", \\\"139\\\": \\\"frisquet\\\", \\\"140\\\": \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9e\\\", \\\"141\\\": \\\"animaux\\\", \\\"142\\\": \\\"dernier\\\", \\\"143\\\": \\\"aimait\\\", \\\"144\\\": \\\"un\\\", \\\"145\\\": \\\"conduisait\\\", \\\"146\\\": \\\"que\\\", \\\"147\\\": \\\"nouvelle\\\", \\\"148\\\": \\\"vieille\\\", \\\"149\\\": \\\"vu\\\", \\\"150\\\": \\\"verte\\\", \\\"151\\\": \\\"petite\\\", \\\"152\\\": \\\"nos\\\", \\\"153\\\": \\\"noire\\\", \\\"154\\\": \\\"brillant\\\", \\\"155\\\": \\\"blanche\\\", \\\"156\\\": \\\"redout\\\\u00e9\\\", \\\"157\\\": \\\"pleut\\\", \\\"158\\\": \\\"n'aimait\\\", \\\"159\\\": \\\"pamplemousses\\\", \\\"160\\\": \\\"pense\\\", \\\"161\\\": \\\"entre\\\", \\\"162\\\": \\\"bleue\\\", \\\"163\\\": \\\"nouveau\\\", \\\"164\\\": \\\"traduire\\\", \\\"165\\\": \\\"rouill\\\\u00e9e\\\", \\\"166\\\": \\\"bleu\\\", \\\"167\\\": \\\"se\\\", \\\"168\\\": \\\"grande\\\", \\\"169\\\": \\\"rouill\\\\u00e9\\\", \\\"170\\\": \\\"ses\\\", \\\"171\\\": \\\"qu'il\\\", \\\"172\\\": \\\"blanc\\\", \\\"173\\\": \\\"aux\\\", \\\"174\\\": \\\"brillante\\\", \\\"175\\\": \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9s\\\", \\\"176\\\": \\\"noir\\\", \\\"177\\\": \\\"pluies\\\", \\\"178\\\": \\\"envisage\\\", \\\"179\\\": \\\"\\\\u00e9taient\\\", \\\"180\\\": \\\"va\\\", \\\"181\\\": \\\"rendre\\\", \\\"182\\\": \\\"vert\\\", \\\"183\\\": \\\"vieux\\\", \\\"184\\\": \\\"petit\\\", \\\"185\\\": \\\"espagnol\\\", \\\"186\\\": \\\"portugais\\\", \\\"187\\\": \\\"chinois\\\", \\\"188\\\": \\\"anglais\\\", \\\"189\\\": \\\"fran\\\\u00e7ais\\\", \\\"190\\\": \\\"glaciales\\\", \\\"191\\\": \\\"mes\\\", \\\"192\\\": \\\"cet\\\", \\\"193\\\": \\\"automobile\\\", \\\"194\\\": \\\"traduction\\\", \\\"195\\\": \\\"mouill\\\\u00e9\\\", \\\"196\\\": \\\"difficile\\\", \\\"197\\\": \\\"amusant\\\", \\\"198\\\": \\\"facile\\\", \\\"199\\\": \\\"comme\\\", \\\"200\\\": \\\"gros\\\", \\\"201\\\": \\\"souris\\\", \\\"202\\\": \\\"pourrait\\\", \\\"203\\\": \\\"voulait\\\", \\\"204\\\": \\\"veut\\\", \\\"205\\\": \\\"pourquoi\\\", \\\"206\\\": \\\"aim\\\\u00e9s\\\", \\\"207\\\": \\\"pr\\\\u00e9vois\\\", \\\"208\\\": \\\"pr\\\\u00e9voyons\\\", \\\"209\\\": \\\"vos\\\", \\\"210\\\": \\\"intention\\\", \\\"211\\\": \\\"cl\\\\u00e9mentes\\\", \\\"212\\\": \\\"ont\\\", \\\"213\\\": \\\"chat\\\", \\\"214\\\": \\\"requin\\\", \\\"215\\\": \\\"cheval\\\", \\\"216\\\": \\\"chien\\\", \\\"217\\\": \\\"singe\\\", \\\"218\\\": \\\"lion\\\", \\\"219\\\": \\\"ours\\\", \\\"220\\\": \\\"lapin\\\", \\\"221\\\": \\\"serpent\\\", \\\"222\\\": \\\"redout\\\\u00e9s\\\", \\\"223\\\": \\\"all\\\\u00e9\\\", \\\"224\\\": \\\"grosse\\\", \\\"225\\\": \\\"pluie\\\", \\\"226\\\": \\\"trop\\\", \\\"227\\\": \\\"monde\\\", \\\"228\\\": \\\"maillot\\\", \\\"229\\\": \\\"vont\\\", \\\"230\\\": \\\"volant\\\", \\\"231\\\": \\\"avez\\\", \\\"232\\\": \\\"i\\\", \\\"233\\\": \\\"all\\\\u00e9s\\\", \\\"234\\\": \\\"all\\\\u00e9e\\\", \\\"235\\\": \\\"quand\\\", \\\"236\\\": \\\"oiseau\\\", \\\"237\\\": \\\"\\\\u00e9l\\\\u00e9phant\\\", \\\"238\\\": \\\"pourraient\\\", \\\"239\\\": \\\"voulaient\\\", \\\"240\\\": \\\"veulent\\\", \\\"241\\\": \\\"d\\\\u00e9tendre\\\", \\\"242\\\": \\\"aim\\\\u00e9e\\\", \\\"243\\\": \\\"magnifique\\\", \\\"244\\\": \\\"l'automobile\\\", \\\"245\\\": \\\"n'aimons\\\", \\\"246\\\": \\\"gel\\\\u00e9\\\", \\\"247\\\": \\\"d\\\\u00e9testait\\\", \\\"248\\\": \\\"grand\\\", \\\"249\\\": \\\"bien\\\", \\\"250\\\": \\\"vers\\\", \\\"251\\\": \\\"pr\\\\u00e9voient\\\", \\\"252\\\": \\\"pr\\\\u00e9voit\\\", \\\"253\\\": \\\"lui\\\", \\\"254\\\": \\\"visite\\\", \\\"255\\\": \\\"comment\\\", \\\"256\\\": \\\"\\\\u00e9l\\\\u00e9phants\\\", \\\"257\\\": \\\"chevaux\\\", \\\"258\\\": \\\"chiens\\\", \\\"259\\\": \\\"l'\\\\u00e9l\\\\u00e9phant\\\", \\\"260\\\": \\\"l'oiseau\\\", \\\"261\\\": \\\"requins\\\", \\\"262\\\": \\\"l'ours\\\", \\\"263\\\": \\\"serpents\\\", \\\"264\\\": \\\"chats\\\", \\\"265\\\": \\\"lapins\\\", \\\"266\\\": \\\"singes\\\", \\\"267\\\": \\\"oiseaux\\\", \\\"268\\\": \\\"lions\\\", \\\"269\\\": \\\"l\\\\u00e9g\\\\u00e8re\\\", \\\"270\\\": \\\"c\\\\u00e9page\\\", \\\"271\\\": \\\"pensez\\\", \\\"272\\\": \\\"tour\\\", \\\"273\\\": \\\"eiffel\\\", \\\"274\\\": \\\"l'\\\\u00e9picerie\\\", \\\"275\\\": \\\"terrain\\\", \\\"276\\\": \\\"football\\\", \\\"277\\\": \\\"lac\\\", \\\"278\\\": \\\"l'\\\\u00e9cole\\\", \\\"279\\\": \\\"l'animal\\\", \\\"280\\\": \\\"n'est\\\", \\\"281\\\": \\\"allons\\\", \\\"282\\\": \\\"allez\\\", \\\"283\\\": \\\"peu\\\", \\\"284\\\": \\\"pousse\\\", \\\"285\\\": \\\"du\\\", \\\"286\\\": \\\"temps\\\", \\\"287\\\": \\\"at\\\", \\\"288\\\": \\\"rouille\\\", \\\"289\\\": \\\"sur\\\", \\\"290\\\": \\\"qu'elle\\\", \\\"291\\\": \\\"petites\\\", \\\"292\\\": \\\"derni\\\\u00e8re\\\", \\\"293\\\": \\\"\\\\u00eates\\\", \\\"294\\\": \\\"vais\\\", \\\"295\\\": \\\"voudrait\\\", \\\"296\\\": \\\"proches\\\", \\\"297\\\": \\\"frais\\\", \\\"298\\\": \\\"manguiers\\\", \\\"299\\\": \\\"avons\\\", \\\"300\\\": \\\"t\\\", \\\"301\\\": \\\"porcelaine\\\", \\\"302\\\": \\\"d\\\\u00e9testez\\\", \\\"303\\\": \\\"c'est\\\", \\\"304\\\": \\\"grandes\\\", \\\"305\\\": \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9es\\\", \\\"306\\\": \\\"douce\\\", \\\"307\\\": \\\"durant\\\", \\\"308\\\": \\\"cong\\\\u00e9lation\\\", \\\"309\\\": \\\"pla\\\\u00eet\\\", \\\"310\\\": \\\"o\\\\u00f9\\\", \\\"311\\\": \\\"dans\\\", \\\"312\\\": \\\"voulez\\\", \\\"313\\\": \\\"aimeraient\\\", \\\"314\\\": \\\"n'a\\\", \\\"315\\\": \\\"petits\\\", \\\"316\\\": \\\"grands\\\", \\\"317\\\": \\\"limes\\\", \\\"318\\\": \\\"envisagent\\\", \\\"319\\\": \\\"grosses\\\", \\\"320\\\": \\\"b\\\\u00e9nigne\\\", \\\"321\\\": \\\"mouill\\\\u00e9e\\\", \\\"322\\\": \\\"enneig\\\\u00e9\\\", \\\"323\\\": \\\"moindres\\\", \\\"324\\\": \\\"conduite\\\", \\\"325\\\": \\\"gel\\\\u00e9s\\\", \\\"326\\\": \\\"tout\\\", \\\"327\\\": \\\"etats\\\", \\\"328\\\": \\\"n'\\\\u00eates\\\", \\\"329\\\": \\\"vit\\\", \\\"330\\\": \\\"ressort\\\", \\\"331\\\": \\\"d\\\\u00e9tend\\\", \\\"332\\\": \\\"redout\\\\u00e9e\\\", \\\"333\\\": \\\"tu\\\", \\\"334\\\": \\\"qui\\\", \\\"335\\\": \\\"traduis\\\", \\\"336\\\": \\\"appr\\\\u00e9ci\\\\u00e9\\\", \\\"337\\\": \\\"allions\\\", \\\"338\\\": \\\"trouv\\\\u00e9\\\", \\\"339\\\": \\\"as\\\", \\\"340\\\": \\\"faire\\\", \\\"341\\\": \\\"favoris\\\", \\\"342\\\": \\\"souvent\\\", \\\"343\\\": \\\"es\\\", \\\"344\\\": \\\"moteur\\\"}\", \"word_index\": \"{\\\"est\\\": 1, \\\"en\\\": 2, \\\"il\\\": 3, \\\"les\\\": 4, \\\"mais\\\": 5, \\\"et\\\": 6, \\\"la\\\": 7, \\\"parfois\\\": 8, \\\"jamais\\\": 9, \\\"le\\\": 10, \\\"l'\\\": 11, \\\"g\\\\u00e9n\\\\u00e9ralement\\\": 12, \\\"moins\\\": 13, \\\"aim\\\\u00e9\\\": 14, \\\"au\\\": 15, \\\"fruit\\\": 16, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9\\\": 17, \\\"agr\\\\u00e9able\\\": 18, \\\"froid\\\": 19, \\\"son\\\": 20, \\\"chaud\\\": 21, \\\"de\\\": 22, \\\"plus\\\": 23, \\\"automne\\\": 24, \\\"mois\\\": 25, \\\"\\\\u00e0\\\": 26, \\\"elle\\\": 27, \\\"citrons\\\": 28, \\\"paris\\\": 29, \\\"inde\\\": 30, \\\"unis\\\": 31, \\\"\\\\u00e9tats\\\": 32, \\\"france\\\": 33, \\\"jersey\\\": 34, \\\"new\\\": 35, \\\"chine\\\": 36, \\\"pendant\\\": 37, \\\"pamplemousse\\\": 38, \\\"mon\\\": 39, \\\"votre\\\": 40, \\\"juin\\\": 41, \\\"printemps\\\": 42, \\\"janvier\\\": 43, \\\"hiver\\\": 44, \\\"mars\\\": 45, \\\"\\\\u00e9t\\\\u00e9\\\": 46, \\\"mai\\\": 47, \\\"septembre\\\": 48, \\\"juillet\\\": 49, \\\"avril\\\": 50, \\\"novembre\\\": 51, \\\"d\\\\u00e9cembre\\\": 52, \\\"f\\\\u00e9vrier\\\": 53, \\\"octobre\\\": 54, \\\"aime\\\": 55, \\\"ao\\\\u00fbt\\\": 56, \\\"merveilleux\\\": 57, \\\"relaxant\\\": 58, \\\"doux\\\": 59, \\\"humide\\\": 60, \\\"notre\\\": 61, \\\"californie\\\": 62, \\\"sec\\\": 63, \\\"leur\\\": 64, \\\"occup\\\\u00e9\\\": 65, \\\"pluvieux\\\": 66, \\\"calme\\\": 67, \\\"beau\\\": 68, \\\"habituellement\\\": 69, \\\"pommes\\\": 70, \\\"p\\\\u00eaches\\\": 71, \\\"oranges\\\": 72, \\\"poires\\\": 73, \\\"fraises\\\": 74, \\\"bananes\\\": 75, \\\"verts\\\": 76, \\\"raisins\\\": 77, \\\"mangues\\\": 78, \\\"d'\\\": 79, \\\"mangue\\\": 80, \\\"gel\\\": 81, \\\"raisin\\\": 82, \\\"pomme\\\": 83, \\\"l'orange\\\": 84, \\\"citron\\\": 85, \\\"chaux\\\": 86, \\\"banane\\\": 87, \\\"poire\\\": 88, \\\"fraise\\\": 89, \\\"p\\\\u00eache\\\": 90, \\\"pas\\\": 91, \\\"enneig\\\\u00e9e\\\": 92, \\\"favori\\\": 93, \\\"d\\\\u00e9teste\\\": 94, \\\"g\\\\u00e8le\\\": 95, \\\"fruits\\\": 96, \\\"voiture\\\": 97, \\\"l'automne\\\": 98, \\\"ils\\\": 99, \\\"n'aime\\\": 100, \\\"california\\\": 101, \\\"neige\\\": 102, \\\"fait\\\": 103, \\\"belle\\\": 104, \\\"ne\\\": 105, \\\"vous\\\": 106, \\\"nous\\\": 107, \\\"des\\\": 108, \\\"animal\\\": 109, \\\"camion\\\": 110, \\\"cours\\\": 111, \\\"neigeux\\\": 112, \\\"conduit\\\": 113, \\\"prochain\\\": 114, \\\"ce\\\": 115, \\\"je\\\": 116, \\\"tranquille\\\": 117, \\\"a\\\": 118, \\\"cher\\\": 119, \\\"une\\\": 120, \\\"cette\\\": 121, \\\"\\\\u00e9tait\\\": 122, \\\"aller\\\": 123, \\\"aiment\\\": 124, \\\"chaude\\\": 125, \\\"aimons\\\": 126, \\\"n'aiment\\\": 127, \\\"n'aimez\\\": 128, \\\"leurs\\\": 129, \\\"aimez\\\": 130, \\\"sont\\\": 131, \\\"d\\\\u00e9testons\\\": 132, \\\"jaune\\\": 133, \\\"rouge\\\": 134, \\\"j'aime\\\": 135, \\\"visiter\\\": 136, \\\"s\\\\u00e8che\\\": 137, \\\"occup\\\\u00e9e\\\": 138, \\\"frisquet\\\": 139, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9e\\\": 140, \\\"animaux\\\": 141, \\\"dernier\\\": 142, \\\"aimait\\\": 143, \\\"un\\\": 144, \\\"conduisait\\\": 145, \\\"que\\\": 146, \\\"nouvelle\\\": 147, \\\"vieille\\\": 148, \\\"vu\\\": 149, \\\"verte\\\": 150, \\\"petite\\\": 151, \\\"nos\\\": 152, \\\"noire\\\": 153, \\\"brillant\\\": 154, \\\"blanche\\\": 155, \\\"redout\\\\u00e9\\\": 156, \\\"pleut\\\": 157, \\\"n'aimait\\\": 158, \\\"pamplemousses\\\": 159, \\\"pense\\\": 160, \\\"entre\\\": 161, \\\"bleue\\\": 162, \\\"nouveau\\\": 163, \\\"traduire\\\": 164, \\\"rouill\\\\u00e9e\\\": 165, \\\"bleu\\\": 166, \\\"se\\\": 167, \\\"grande\\\": 168, \\\"rouill\\\\u00e9\\\": 169, \\\"ses\\\": 170, \\\"qu'il\\\": 171, \\\"blanc\\\": 172, \\\"aux\\\": 173, \\\"brillante\\\": 174, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9s\\\": 175, \\\"noir\\\": 176, \\\"pluies\\\": 177, \\\"envisage\\\": 178, \\\"\\\\u00e9taient\\\": 179, \\\"va\\\": 180, \\\"rendre\\\": 181, \\\"vert\\\": 182, \\\"vieux\\\": 183, \\\"petit\\\": 184, \\\"espagnol\\\": 185, \\\"portugais\\\": 186, \\\"chinois\\\": 187, \\\"anglais\\\": 188, \\\"fran\\\\u00e7ais\\\": 189, \\\"glaciales\\\": 190, \\\"mes\\\": 191, \\\"cet\\\": 192, \\\"automobile\\\": 193, \\\"traduction\\\": 194, \\\"mouill\\\\u00e9\\\": 195, \\\"difficile\\\": 196, \\\"amusant\\\": 197, \\\"facile\\\": 198, \\\"comme\\\": 199, \\\"gros\\\": 200, \\\"souris\\\": 201, \\\"pourrait\\\": 202, \\\"voulait\\\": 203, \\\"veut\\\": 204, \\\"pourquoi\\\": 205, \\\"aim\\\\u00e9s\\\": 206, \\\"pr\\\\u00e9vois\\\": 207, \\\"pr\\\\u00e9voyons\\\": 208, \\\"vos\\\": 209, \\\"intention\\\": 210, \\\"cl\\\\u00e9mentes\\\": 211, \\\"ont\\\": 212, \\\"chat\\\": 213, \\\"requin\\\": 214, \\\"cheval\\\": 215, \\\"chien\\\": 216, \\\"singe\\\": 217, \\\"lion\\\": 218, \\\"ours\\\": 219, \\\"lapin\\\": 220, \\\"serpent\\\": 221, \\\"redout\\\\u00e9s\\\": 222, \\\"all\\\\u00e9\\\": 223, \\\"grosse\\\": 224, \\\"pluie\\\": 225, \\\"trop\\\": 226, \\\"monde\\\": 227, \\\"maillot\\\": 228, \\\"vont\\\": 229, \\\"volant\\\": 230, \\\"avez\\\": 231, \\\"i\\\": 232, \\\"all\\\\u00e9s\\\": 233, \\\"all\\\\u00e9e\\\": 234, \\\"quand\\\": 235, \\\"oiseau\\\": 236, \\\"\\\\u00e9l\\\\u00e9phant\\\": 237, \\\"pourraient\\\": 238, \\\"voulaient\\\": 239, \\\"veulent\\\": 240, \\\"d\\\\u00e9tendre\\\": 241, \\\"aim\\\\u00e9e\\\": 242, \\\"magnifique\\\": 243, \\\"l'automobile\\\": 244, \\\"n'aimons\\\": 245, \\\"gel\\\\u00e9\\\": 246, \\\"d\\\\u00e9testait\\\": 247, \\\"grand\\\": 248, \\\"bien\\\": 249, \\\"vers\\\": 250, \\\"pr\\\\u00e9voient\\\": 251, \\\"pr\\\\u00e9voit\\\": 252, \\\"lui\\\": 253, \\\"visite\\\": 254, \\\"comment\\\": 255, \\\"\\\\u00e9l\\\\u00e9phants\\\": 256, \\\"chevaux\\\": 257, \\\"chiens\\\": 258, \\\"l'\\\\u00e9l\\\\u00e9phant\\\": 259, \\\"l'oiseau\\\": 260, \\\"requins\\\": 261, \\\"l'ours\\\": 262, \\\"serpents\\\": 263, \\\"chats\\\": 264, \\\"lapins\\\": 265, \\\"singes\\\": 266, \\\"oiseaux\\\": 267, \\\"lions\\\": 268, \\\"l\\\\u00e9g\\\\u00e8re\\\": 269, \\\"c\\\\u00e9page\\\": 270, \\\"pensez\\\": 271, \\\"tour\\\": 272, \\\"eiffel\\\": 273, \\\"l'\\\\u00e9picerie\\\": 274, \\\"terrain\\\": 275, \\\"football\\\": 276, \\\"lac\\\": 277, \\\"l'\\\\u00e9cole\\\": 278, \\\"l'animal\\\": 279, \\\"n'est\\\": 280, \\\"allons\\\": 281, \\\"allez\\\": 282, \\\"peu\\\": 283, \\\"pousse\\\": 284, \\\"du\\\": 285, \\\"temps\\\": 286, \\\"at\\\": 287, \\\"rouille\\\": 288, \\\"sur\\\": 289, \\\"qu'elle\\\": 290, \\\"petites\\\": 291, \\\"derni\\\\u00e8re\\\": 292, \\\"\\\\u00eates\\\": 293, \\\"vais\\\": 294, \\\"voudrait\\\": 295, \\\"proches\\\": 296, \\\"frais\\\": 297, \\\"manguiers\\\": 298, \\\"avons\\\": 299, \\\"t\\\": 300, \\\"porcelaine\\\": 301, \\\"d\\\\u00e9testez\\\": 302, \\\"c'est\\\": 303, \\\"grandes\\\": 304, \\\"pr\\\\u00e9f\\\\u00e9r\\\\u00e9es\\\": 305, \\\"douce\\\": 306, \\\"durant\\\": 307, \\\"cong\\\\u00e9lation\\\": 308, \\\"pla\\\\u00eet\\\": 309, \\\"o\\\\u00f9\\\": 310, \\\"dans\\\": 311, \\\"voulez\\\": 312, \\\"aimeraient\\\": 313, \\\"n'a\\\": 314, \\\"petits\\\": 315, \\\"grands\\\": 316, \\\"limes\\\": 317, \\\"envisagent\\\": 318, \\\"grosses\\\": 319, \\\"b\\\\u00e9nigne\\\": 320, \\\"mouill\\\\u00e9e\\\": 321, \\\"enneig\\\\u00e9\\\": 322, \\\"moindres\\\": 323, \\\"conduite\\\": 324, \\\"gel\\\\u00e9s\\\": 325, \\\"tout\\\": 326, \\\"etats\\\": 327, \\\"n'\\\\u00eates\\\": 328, \\\"vit\\\": 329, \\\"ressort\\\": 330, \\\"d\\\\u00e9tend\\\": 331, \\\"redout\\\\u00e9e\\\": 332, \\\"tu\\\": 333, \\\"qui\\\": 334, \\\"traduis\\\": 335, \\\"appr\\\\u00e9ci\\\\u00e9\\\": 336, \\\"allions\\\": 337, \\\"trouv\\\\u00e9\\\": 338, \\\"as\\\": 339, \\\"faire\\\": 340, \\\"favoris\\\": 341, \\\"souvent\\\": 342, \\\"es\\\": 343, \\\"moteur\\\": 344}\"}}"
Task 2/images/attention.png ADDED
Task 2/images/bidirectional.png ADDED
Task 2/images/embedding-words.png ADDED
Task 2/images/encoder-decoder-context.png ADDED
Task 2/images/encoder-decoder-translation.png ADDED
Task 2/images/rnn.png ADDED
Task 2/sequence_length.json ADDED
@@ -0,0 +1 @@
 
 
1
+ 21
Task 2/spa_vectorization_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name": "text_vectorization_1", "trainable": true, "batch_input_shape": [null], "dtype": "string", "max_tokens": 15000, "split": "whitespace", "ngrams": null, "output_mode": "int", "output_sequence_length": 21, "pad_to_max_tokens": false, "sparse": false, "ragged": false, "vocabulary": null, "idf_weights": null}
Task 2/spa_vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Task 2/transformer_model/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c57e5b1c49a163881bb486d7ab440406af67981fb4d43c1b293d4f3e8be60b6b
3
+ size 58739
Task 2/transformer_model/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e90ad65260d2a5f6b86eb2222070bfff801829a65b959df096fdf5609f4979
3
+ size 1305169
Task 2/transformer_model/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd0c4d41a936b3fc1f51305eec565e34f2eaac945ab5bdaee783e2cb6064ae6
3
+ size 159724466
Task 2/transformer_model/variables/variables.index ADDED
Binary file (7.6 kB). View file
 
Task 2/word detection.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import libraries
2
+ import tkinter as tk
3
+ from tkinter import ttk, messagebox
4
+
5
+ from keras.layers import TextVectorization
6
+ import re
7
+ import tensorflow.strings as tf_strings
8
+ import json
9
+ import string
10
+ from keras.models import load_model
11
+ import tensorflow as tf
12
+ from keras.preprocessing.text import tokenizer_from_json
13
+ from keras.utils import pad_sequences
14
+ import numpy as np
15
+ import difflib
16
+
17
+ # English to Spanish translation
18
+ strip_chars = string.punctuation + "¿"
19
+ strip_chars = strip_chars.replace("[", "")
20
+ strip_chars = strip_chars.replace("]", "")
21
+
22
+ def custom_standardization(input_string):
23
+ lowercase = tf_strings.lower(input_string)
24
+ return tf_strings.regex_replace(lowercase, f"[{re.escape(strip_chars)}]", "")
25
+
26
+ # Load the English vectorization layer configuration
27
+ with open('eng_vectorization_config.json') as json_file:
28
+ eng_vectorization_config = json.load(json_file)
29
+
30
+ # Recreate the English vectorization layer with basic configuration
31
+ eng_vectorization = TextVectorization(
32
+ max_tokens=eng_vectorization_config['max_tokens'],
33
+ output_mode=eng_vectorization_config['output_mode'],
34
+ output_sequence_length=eng_vectorization_config['output_sequence_length']
35
+ )
36
+
37
+ # Apply the custom standardization function
38
+ eng_vectorization.standardize = custom_standardization
39
+
40
+ # Load the Spanish vectorization layer configuration
41
+ with open('spa_vectorization_config.json') as json_file:
42
+ spa_vectorization_config = json.load(json_file)
43
+
44
+ # Recreate the Spanish vectorization layer with basic configuration
45
+ spa_vectorization = TextVectorization(
46
+ max_tokens=spa_vectorization_config['max_tokens'],
47
+ output_mode=spa_vectorization_config['output_mode'],
48
+ output_sequence_length=spa_vectorization_config['output_sequence_length'],
49
+ standardize=custom_standardization
50
+ )
51
+
52
+ # Load and set the English vocabulary
53
+ with open('eng_vocab.json') as json_file:
54
+ eng_vocab = json.load(json_file)
55
+ eng_vectorization.set_vocabulary(eng_vocab)
56
+
57
+ # Load and set the Spanish vocabulary
58
+ with open('spa_vocab.json') as json_file:
59
+ spa_vocab = json.load(json_file)
60
+ spa_vectorization.set_vocabulary(spa_vocab)
61
+
62
+ # Load the Spanish model
63
+ transformer = load_model('transformer_model')
64
+
65
+ spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))
66
+ max_decoded_sentence_length = 20
67
+
68
+ # Initialize list to track incorrect words
69
+ incorrect_words = []
70
+
71
+ def beam_search_decode(input_sentence, beam_width=3):
72
+ tokenized_input_sentence = eng_vectorization([input_sentence])
73
+ decoded_sentences = [("[start]", 0.0)]
74
+
75
+ for i in range(max_decoded_sentence_length):
76
+ all_candidates = []
77
+ for decoded_sentence, score in decoded_sentences:
78
+ tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1]
79
+ predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])
80
+ top_k = tf.math.top_k(predictions[0, i, :], k=beam_width)
81
+
82
+ for j in range(beam_width):
83
+ predicted_token_index = top_k.indices[j].numpy()
84
+ predicted_token = spa_index_lookup[predicted_token_index]
85
+ candidate = (decoded_sentence + " " + predicted_token, score + top_k.values[j].numpy())
86
+ all_candidates.append(candidate)
87
+
88
+ ordered = sorted(all_candidates, key=lambda x: x[1], reverse=True)
89
+ decoded_sentences = ordered[:beam_width]
90
+
91
+ if all(sentence[0].endswith("[end]") for sentence in decoded_sentences):
92
+ break
93
+
94
+ return decoded_sentences[0][0]
95
+
96
+ # English to French translation
97
+ # Load French model
98
+ model = load_model('english_to_french_model')
99
+
100
+ # Load Tokenizer
101
+ with open('english_tokenizer.json') as f:
102
+ data = json.load(f)
103
+ english_tokenizer = tokenizer_from_json(data)
104
+
105
+ with open('french_tokenizer.json') as f:
106
+ data = json.load(f)
107
+ french_tokenizer = tokenizer_from_json(data)
108
+
109
+ # Load max length
110
+ with open('sequence_length.json') as f:
111
+ max_length = json.load(f)
112
+
113
+ def pad(x, length=None):
114
+ return pad_sequences(x, maxlen=length, padding='post')
115
+
116
+ def translate_to_french(english_sentence):
117
+ english_sentence = english_sentence.lower()
118
+ english_sentence = re.sub(r'[.?!,]', '', english_sentence)
119
+ english_sentence = english_tokenizer.texts_to_sequences([english_sentence])
120
+ english_sentence = pad(english_sentence, max_length)
121
+ english_sentence = english_sentence.reshape((-1, max_length))
122
+
123
+ french_sentence = model.predict(english_sentence)[0]
124
+ french_sentence = [np.argmax(word) for word in french_sentence]
125
+ french_sentence = french_tokenizer.sequences_to_texts([french_sentence])[0]
126
+
127
+ return french_sentence
128
+
129
+ def get_word_suggestions(word, vocab):
130
+ return difflib.get_close_matches(word, vocab, n=3, cutoff=0.6)
131
+
132
+ def check_and_correct_sentence(sentence, vocab):
133
+ words = sentence.split()
134
+ incorrect_words.clear()
135
+ corrected_sentence = []
136
+ for word in words:
137
+ if word not in vocab:
138
+ suggestions = get_word_suggestions(word, vocab)
139
+ incorrect_words.append((word, suggestions))
140
+ else:
141
+ corrected_sentence.append(word)
142
+
143
+ if incorrect_words:
144
+ message = f"Incorrect word(s) detected: {', '.join([w[0] for w in incorrect_words])}\n"
145
+ for word, suggestions in incorrect_words:
146
+ message += f"Suggestions for '{word}': {', '.join(suggestions) if suggestions else 'No suggestions available'}\n"
147
+ if len(incorrect_words) >= 2:
148
+ messagebox.showerror("Error", message)
149
+ return False
150
+ return True
151
+
152
+ def translate_to_spanish(english_sentence):
153
+ if not check_and_correct_sentence(english_sentence, eng_vocab):
154
+ return ""
155
+ spanish_sentence = beam_search_decode(english_sentence)
156
+ return spanish_sentence.replace("[start]", "").replace("[end]", "").strip()
157
+
158
+ # Function to handle translation request based on selected language
159
+ def handle_translate():
160
+ selected_language = language_var.get()
161
+ english_sentence = text_input.get("1.0", "end-1c").strip()
162
+
163
+ if not english_sentence:
164
+ messagebox.showwarning("Warning", "Please enter a sentence to translate.")
165
+ return
166
+
167
+ if selected_language == "French":
168
+ translation = translate_to_french(english_sentence)
169
+ elif selected_language == "Spanish":
170
+ translation = translate_to_spanish(english_sentence)
171
+
172
+ translation_output.delete("1.0", "end")
173
+ translation_output.insert("end", f"{selected_language} translation: {translation}")
174
+
175
+ # Setting up the main window
176
+ root = tk.Tk()
177
+ root.title("Language Translator")
178
+ root.geometry("550x600")
179
+
180
+ # Font configuration
181
+ font_style = "Times New Roman"
182
+ font_size = 14
183
+
184
+ # Frame for input
185
+ input_frame = tk.Frame(root)
186
+ input_frame.pack(pady=10)
187
+
188
+ # Heading for input
189
+ input_heading = tk.Label(input_frame, text="Enter the text to be translated", font=(font_style, font_size, 'bold'))
190
+ input_heading.pack()
191
+ # Text input for English sentence
192
+ text_input = tk.Text(input_frame, height=5, width=50, font=(font_style, font_size))
193
+ text_input.pack()
194
+
195
+ # Language selection
196
+ language_var = tk.StringVar()
197
+ language_label = tk.Label(root, text="Select the language to translate to", font=(font_style, font_size, 'bold'))
198
+ language_label.pack()
199
+ language_select = ttk.Combobox(root, textvariable=language_var, values=["French", "Spanish"], font=(font_style, font_size), state="readonly")
200
+ language_select.pack()
201
+
202
+ # Submit button
203
+ submit_button = ttk.Button(root, text="Translate", command=handle_translate)
204
+ submit_button.pack(pady=10)
205
+
206
+ # Frame for output
207
+ output_frame = tk.Frame(root)
208
+ output_frame.pack(pady=10)
209
+ # Heading for output
210
+ output_heading = tk.Label(output_frame, text="Translation: ", font=(font_style, font_size, 'bold'))
211
+ output_heading.pack()
212
+
213
+ # Text output for translations
214
+ translation_output = tk.Text(output_frame, height=10, width=50, font=(font_style, font_size))
215
+ translation_output.pack()
216
+
217
+ # Running the application
218
+ root.mainloop()