productizationlabs
commited on
Commit
·
55d0a48
1
Parent(s):
f7a6ae3
Upload app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,19 @@
|
|
1 |
-
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
from nltk.corpus import stopwords
|
5 |
from nltk.tokenize import word_tokenize
|
6 |
from nltk.stem.wordnet import WordNetLemmatizer
|
7 |
-
|
8 |
-
|
9 |
-
nltk.download('punkt')
|
10 |
-
nltk.download('wordnet')
|
11 |
-
nltk.download('stopwords')
|
12 |
-
|
13 |
-
# Import the dataset
|
14 |
-
df = pd.read_csv('Hotel_Reviews.csv')
|
15 |
-
df['countries'] = df.Hotel_Address.apply(lambda x: x.split(' ')[-1])
|
16 |
-
|
17 |
-
# Define the function to recommend hotels
|
18 |
def Input_your_destination_and_description(location,description):
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
f_set=set()
|
31 |
-
for fs in filtered_sen:
|
32 |
-
f_set.add(lemm.lemmatize(fs))
|
33 |
-
|
34 |
-
# Defining a new variable that takes in the location inputted and bring out the features defined below
|
35 |
-
country_feat = df[df['countries']==location.lower()]
|
36 |
-
country_feat = country_feat.set_index(np.arange(country_feat.shape[0]))
|
37 |
-
cos=[];
|
38 |
-
for i in range(country_feat.shape[0]):
|
39 |
-
temp_tokens=word_tokenize(country_feat['Tags'][i])
|
40 |
-
temp1_set={w for w in temp_tokens if not w in sw}
|
41 |
-
temp_set=set()
|
42 |
-
for se in temp1_set:
|
43 |
-
temp_set.add(lemm.lemmatize(se))
|
44 |
-
rvector = temp_set.intersection(f_set)
|
45 |
-
cos.append(len(rvector))
|
46 |
-
country_feat['similarity']=cos
|
47 |
-
country_feat=country_feat.sort_values(by='similarity',ascending=False)
|
48 |
-
country_feat.drop_duplicates(subset='Hotel_Name',keep='first',inplace=True)
|
49 |
-
country_feat.sort_values('Average_Score',ascending=False,inplace=True)
|
50 |
-
country_feat.reset_index(inplace=True)
|
51 |
-
return country_feat[['Hotel_Name','Average_Score','Hotel_Address']].head(10)
|
52 |
-
|
53 |
-
# Create the input interface
|
54 |
-
inputs = [gr.inputs.Textbox(label="Location"),
|
55 |
-
gr.inputs.Textbox(label="Purpose of Travel")]
|
56 |
-
|
57 |
-
# Create the output interface
|
58 |
-
outputs=gr.outputs.Dataframe(label="Hotel Recommendations",type="pandas")
|
59 |
-
|
60 |
-
# Create the interface
|
61 |
-
gr.Interface(fn=Input_your_destination_and_description,
|
62 |
-
inputs=inputs,
|
63 |
-
outputs=outputs,theme=gr.themes.Default(primary_hue="slate")).launch()
|
64 |
-
|
|
|
1 |
+
_A='countries'
|
2 |
+
import gradio as gr,numpy as np,pandas as pd
|
3 |
+
from nltk.corpus import stopwords
|
|
|
4 |
from nltk.tokenize import word_tokenize
|
5 |
from nltk.stem.wordnet import WordNetLemmatizer
|
6 |
+
df=pd.read_csv('Hotel_Reviews.csv')
|
7 |
+
df[_A]=df.Hotel_Address.apply(lambda x:x.split(' ')[-1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def Input_your_destination_and_description(location,description):
|
9 |
+
M='Average_Score';L='Hotel_Name';K=False;J='similarity';D=True;C='Tags';B=description;df[_A]=df[_A].str.lower();df[C]=df[C].str.lower();B=B.lower();N=word_tokenize(B);E=stopwords.words('english');F=WordNetLemmatizer();O={A for A in N if not A in E};G=set()
|
10 |
+
for P in O:G.add(F.lemmatize(P))
|
11 |
+
A=df[df[_A]==location.lower()];A=A.set_index(np.arange(A.shape[0]));H=[]
|
12 |
+
for Q in range(A.shape[0]):
|
13 |
+
R=word_tokenize(A[C][Q]);S={A for A in R if not A in E};I=set()
|
14 |
+
for T in S:I.add(F.lemmatize(T))
|
15 |
+
U=I.intersection(G);H.append(len(U))
|
16 |
+
A[J]=H;A=A.sort_values(by=J,ascending=K);A.drop_duplicates(subset=L,keep='first',inplace=D);A.sort_values(M,ascending=K,inplace=D);A.reset_index(inplace=D);return A[[L,M,'Hotel_Address']].head(10)
|
17 |
+
inputs=[gr.inputs.Textbox(label='Location'),gr.inputs.Textbox(label='Purpose of Travel')]
|
18 |
+
outputs=gr.outputs.Dataframe(label='Hotel Recommendations',type='pandas')
|
19 |
+
gr.Interface(fn=Input_your_destination_and_description,inputs=inputs,outputs=outputs,theme=gr.themes.Default(primary_hue='slate')).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|