path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128015173/cell_13 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='pivnice is the only settlement in vojvodina with slovaks as the largest ethnic groupIs it true?', table=['Settlement', 'Cyrillic Name', 'Other Names', 'Type', 'Population (2011)', 'Largest ethnic group (2002)', 'Dominant religion (2002)'])) | code |
128015173/cell_9 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='here were twelve occasions where the length was sixty minutes. Is it true?', table=['round', 'circuit', 'date', 'length', 'pole position', 'gt3 winner', 'gt4 winner'])) | code |
128015173/cell_25 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Chris Pratt has been in his current profession since the turn of the century.Is it true?', table=['title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'])) | code |
128015173/cell_34 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Kara-Khanid Khanate was disestablished in the second decade of the 12th century.Is it true?', table=['title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established ', 'Disestablished '])) | code |
128015173/cell_33 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='The only government that Kara-Khanid Khanate ever had was a monarchy.', table=['title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established ', 'Disestablished '])) | code |
128015173/cell_44 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='jim mccrerey be first elect in 1988', table=['district', 'incumbent', 'party', 'first elected', 'result', 'candidates'])) | code |
128015173/cell_40 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='In 1982 , the redskin lose to the dallas cowboy. They scored only 10 total points. The cowboys scored 24 score of 24', table=['week', 'date', 'opponent', 'result', 'game site', 'record', 'attendance'])) | code |
128015173/cell_29 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Chris Pratt and Anna Faris have one son together.Is it true?', table=['title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'])) | code |
128015173/cell_39 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='in 1982 , the redskin lose to the dallas cowboy. They scored only 10 total points. The cowboys scored 24.Is it true?', table=['week', 'date', 'opponent', 'result', 'game site', 'record', 'attendance'])) | code |
128015173/cell_26 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Chris Pratt was born in Los Angeles where he currently resides. Is it true?', table=['title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'])) | code |
128015173/cell_48 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
def infer(prompt):
input_data = tokenizer(prompt, max_length=700, return_tensors='pt').input_ids
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
prompt1 = 'Task Description: In this task, your goal is to generate a SQL query using the input sentence and table schema given. The generated SQL query should be able to verify the given sentence without requiring any additional knowledge.\n\nExample 1:\nTable Schema: \'Author\', \'Publication date\', \'Genre\', \'Publisher\', \'Pages\', \'ISBN\'\nInput: The book "To Kill a Mockingbird" was written by Harper Lee, published by J. B. Lippincott Company in 1960, and has 281 pages.\nOutput: SELECT * FROM table WHERE title = "To Kill a Mockingbird" AND Author = "Harper Lee" AND Publisher = "J. B. Lippincott Company" AND Publication date = "1960" AND Pages = 281\n\nExample 2:\nTable Schema: \'Player\', \'Team\', \'Position\', \'Number\', \'Age\', \'Height\', \'Weight\', \'Nationality\'\nInput: Lionel Messi, who is from Argentina, plays for Paris Saint-Germain and wears the number 30 jersey.\nOutput: SELECT * FROM table WHERE Player = "Lionel Messi" AND Nationality = "Argentina" AND Team = "Paris Saint-Germain" AND Number = 30\n\nExample 3:\nTable Schema: \'Movie Title\', \'Director\', \'Lead Actor\', \'Lead Actress\', \'Release Year\', \'Budget\', \'Box Office Collection\'\nInput: The movie "Forrest Gump" was directed by Robert Zemeckis, starred Tom Hanks and Robin Wright, was released in 1994, and had a budget of $55 million.\nOutput: SELECT * FROM table WHERE "Movie Title" = "Forrest Gump" AND Director = "Robert Zemeckis" AND "Lead Actor" = "Tom Hanks" AND "Lead Actress" = "Robin Wright" AND "Release Year" = 1994 AND Budget = "$55 million"\n\nNow complete the following:\nTable Schema: \'title\', \'Birth Name\', \'Born\', \'Residence\', \'Occupation\', \'Years active\', \'Spouse(s)\', \'Partner(s)\', \'Children\'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput: \n'
print(infer(prompt1)) | code |
128015173/cell_19 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='in alberta greens , the year 2008 was the only year were over 50 candidates were nominated', table=['election', 'of candidates nominated', 'of seats won', 'of total votes', '% of popular vote'])) | code |
128015173/cell_49 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
def infer(prompt):
input_data = tokenizer(prompt, max_length=700, return_tensors='pt').input_ids
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
prompt2 = "Task Description: In this task, your goal is to generate a SQL query using the input sentence and table schema given. The generated SQL query should be able to verify the given sentence without requiring any additional knowledge.\n\nNow complete the following:\nTable Schema: 'title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'\nInput: Chris Pratt has been in his current profession since the turn of the century.\nOutput:\n"
print(infer(prompt2)) | code |
128015173/cell_32 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Kara-Khanid Khanate had many people that were Arabic.Is it true?', table=['title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established ', 'Disestablished '])) | code |
128015173/cell_28 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Chris Pratt broke up with Anna Faris because of Katherine Schwarzenegger.Is it true?', table=['title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'])) | code |
128015173/cell_8 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='here were twelve occasions where the length was sixty minutes', table=['round', 'circuit', 'date', 'length', 'pole position', 'gt3 winner', 'gt4 winner'])) | code |
128015173/cell_38 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='n the 1982 , the washington redskins beat the new orleans saint 27 to 10 , with their win score match a win against the new york giant earlier in the season', table=['week', 'date', 'opponent', 'result', 'game site', 'record', 'attendance'])) | code |
128015173/cell_3 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema') | code |
128015173/cell_35 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Tengrism was the religion of the Kara-Khanid Khanate for longer than Islam was.Is it true?', table=['title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established ', 'Disestablished '])) | code |
128015173/cell_43 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='richard baker be louisanas 6th district incumbent', table=['district', 'incumbent', 'party', 'first elected', 'result', 'candidates'])) | code |
128015173/cell_14 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='pivnice is the only settlement in vojvodina with slovaks as the largest ethnic group.', table=['Settlement', 'Cyrillic Name', 'Other Names', 'Type', 'Population (2011)', 'Largest ethnic group (2002)', 'Dominant religion (2002)'])) | code |
128015173/cell_27 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Chris Pratt and Anna Faris have one son together.', table=['title', 'Birth Name', 'Born', 'Residence', 'Occupation', 'Years active', 'Spouse(s)', 'Partner(s)', 'Children'])) | code |
128015173/cell_36 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='Tengrism was the religion of the Kara-Khanid Khanate. It was for longer than Islam was.Is it true?', table=['title', 'Capital', 'Common languages', 'Government', 'Khagan, Khan', 'Established ', 'Disestablished '])) | code |
90137233/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import GaussianNB
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y) | code |
90137233/cell_30 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
print(svc.predict(tfidf.transform(['A good movie'])))
print(svc.predict(tfidf.transform(['An excellent movie'])))
print(svc.predict(tfidf.transform(['I did not like this movie at all I gave this movie away']))) | code |
90137233/cell_20 | [
"text_html_output_1.png"
] | train_y.value_counts() | code |
90137233/cell_6 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90137233/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
print(svc.score(test_x_vector, test_y))
print(dec_tree.score(test_x_vector, test_y))
print(gnb.score(test_x_vector.toarray(), test_y))
print(log_reg.score(test_x_vector, test_y)) | code |
90137233/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y) | code |
90137233/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names()) | code |
90137233/cell_50 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
from sklearn.metrics import f1_score
f1_score(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'], average=None)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'])
conf_mat
from sklearn.model_selection import GridSearchCV
params = {'C': [1, 4, 8, 16, 32], 'kernel': ['linear', 'rbf']}
svc = SVC()
svc_grid = GridSearchCV(svc, params, cv=5)
svc_grid.fit(train_x_vector, train_y) | code |
90137233/cell_45 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
from sklearn.metrics import f1_score
f1_score(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'], average=None)
from sklearn.metrics import classification_report
print(classification_report(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'])) | code |
90137233/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeClassifier
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y) | code |
90137233/cell_51 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
from sklearn.metrics import f1_score
f1_score(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'], average=None)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'])
conf_mat
from sklearn.model_selection import GridSearchCV
params = {'C': [1, 4, 8, 16, 32], 'kernel': ['linear', 'rbf']}
svc = SVC()
svc_grid = GridSearchCV(svc, params, cv=5)
svc_grid.fit(train_x_vector, train_y)
print(svc_grid.best_params_)
print(svc_grid.best_estimator_) | code |
90137233/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review | code |
90137233/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
colors = sns.color_palette('deep')
print(df_review_imb.value_counts('sentiment'))
print(df_review_bal.value_counts('sentiment')) | code |
90137233/cell_47 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
from sklearn.metrics import f1_score
f1_score(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'], average=None)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'])
conf_mat | code |
90137233/cell_43 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
pd.DataFrame.sparse.from_spmatrix(train_x_vector, index=train_x.index, columns=tfidf.get_feature_names())
from sklearn.svm import SVC
svc = SVC(kernel='linear')
svc.fit(train_x_vector, train_y)
from sklearn.tree import DecisionTreeClassifier
dec_tree = DecisionTreeClassifier()
dec_tree.fit(train_x_vector, train_y)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y)
from sklearn.metrics import f1_score
f1_score(test_y, svc.predict(test_x_vector), labels=['positive', 'negative'], average=None) | code |
90137233/cell_14 | [
"text_html_output_1.png"
] | from imblearn.under_sampling import RandomUnderSampler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
colors = sns.color_palette('deep')
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=0)
df_review_bal, df_review_bal['sentiment'] = rus.fit_resample(df_review_imb[['review']], df_review_imb['sentiment'])
df_review_bal | code |
90137233/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
df_review = pd.read_csv('/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df_review
df_positive = df_review[df_review['sentiment'] == 'positive'][:9000]
df_negative = df_review[df_review['sentiment'] == 'negative'][:1000]
df_review_imb = pd.concat([df_positive, df_negative])
colors = sns.color_palette('deep')
plt.figure(figsize=(8, 4), tight_layout=True)
plt.bar(x=['Positive', 'Negative'], height=df_review_imb.value_counts(['sentiment']), color=colors[:2])
plt.title('Sentiment')
plt.savefig('sentiment.png')
plt.show() | code |
90137233/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
train_y.value_counts()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
train_x_vector = tfidf.fit_transform(train_x)
test_x_vector = tfidf.transform(test_x)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_x_vector.toarray(), train_y)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train_x_vector, train_y) | code |
33105010/cell_13 | [
"image_output_1.png"
] | from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Dense
import matplotlib.pyplot as plt
import tensorflow as tf
def sample_dataset():
dataset_shape = (2000, 1)
return tf.random.normal(mean=8.0, shape=dataset_shape, stddev=0.5, dtype=tf.float32)
axes = plt.gca()
axes.set_xlim([-1, 11])
axes.set_ylim([0, 70])
def generator(input_shape):
"""Defines the generator keras.Model.
Args:
input_shape: the desired input shape (e.g.: (latent_space_size))
Returns:
G: The generator model
"""
inputs = Input(input_shape)
net = Dense(units=64, activation=tf.nn.elu, name='fc1')(inputs)
net = Dense(units=64, activation=tf.nn.elu, name='fc2')(net)
net = Dense(units=1, name='G')(net)
G = Model(inputs=inputs, outputs=net)
return G
def disciminator(input_shape):
"""Defines the Discriminator keras.Model.
Args:
input_shape: the desired input shape (e.g.: (the generator output shape))
Returns:
D: the Discriminator model
"""
inputs = Input(input_shape)
net = Dense(units=32, activation=tf.nn.elu, name='fc1')(inputs)
net = Dense(units=1, name='D')(net)
D = Model(inputs=inputs, outputs=net)
return D
input_shape = (1,)
D = disciminator(input_shape)
latent_space_shape = (100,)
G = generator(latent_space_shape)
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def d_loss(d_real, d_fake):
"""The disciminator loss function."""
return bce(tf.ones_like(d_real), d_real) + bce(tf.zeros_like(d_fake), d_fake)
def g_loss(generated_output):
"""The Generator loss function."""
return bce(tf.ones_like(generated_output), generated_output)
optimizer = tf.keras.optimizers.Adam(1e-05)
@tf.function
def train_step():
with tf.GradientTape(persistent=True) as tape:
real_data = sample_dataset()
noise_vector = tf.random.normal(mean=0, stddev=1, shape=(real_data.shape[0], latent_space_shape[0]))
fake_data = G(noise_vector)
d_fake_data = D(fake_data)
d_real_data = D(real_data)
d_loss_value = d_loss(d_real_data, d_fake_data)
g_loss_value = g_loss(d_fake_data)
d_gradients = tape.gradient(d_loss_value, D.trainable_variables)
g_gradients = tape.gradient(g_loss_value, G.trainable_variables)
del tape
optimizer.apply_gradients(zip(d_gradients, D.trainable_variables))
optimizer.apply_gradients(zip(g_gradients, G.trainable_variables))
return (real_data, fake_data, g_loss_value, d_loss_value)
fig, ax = plt.subplots()
for step in range(40000):
real_data, fake_data, g_loss_value, d_loss_value = train_step()
if step % 2000 == 0:
print('G loss: ', g_loss_value.numpy(), ' D loss: ', d_loss_value.numpy(), ' step: ', step)
ax.hist(fake_data.numpy(), 100)
ax.hist(real_data.numpy(), 100)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
textstr = f'step={step}'
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
axes = plt.gca()
axes.set_xlim([-1, 11])
axes.set_ylim([0, 60])
display(plt.gcf())
plt.gca().clear() | code |
33105010/cell_4 | [
"image_output_11.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"image_output_17.png",
"text_plain_output_9.png",
"image_output_14.png",
"text_plain_output_20.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"image_output_18.png",
"image_output_21.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"image_output_20.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_8.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png"
] | import matplotlib.pyplot as plt
import tensorflow as tf
def sample_dataset():
dataset_shape = (2000, 1)
return tf.random.normal(mean=8.0, shape=dataset_shape, stddev=0.5, dtype=tf.float32)
plt.hist(sample_dataset().numpy(), 100)
axes = plt.gca()
axes.set_xlim([-1, 11])
axes.set_ylim([0, 70])
plt.show() | code |
130004323/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures | code |
130004323/cell_3 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
plt.figure(figsize=(10, 8))
sns.heatmap(data.corr(), cmap='RdBu')
plt.title('Correlations Between Variables', size=15)
plt.show() | code |
130004323/cell_12 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, cross_val_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
important = list(data.corr()['SalePrice'][(data.corr()['SalePrice'] > 0.5) | (data.corr()['SalePrice'] < -0.5)].index)
cat_columns = ['MSZoning', 'Utilities', 'BldgType', 'Heating', 'KitchenQual', 'SaleCondition', 'LandSlope']
important_columns = important + cat_columns
data = data[important_columns]
X = data.drop('SalePrice', axis=1)
y = data['SalePrice']
X = pd.get_dummies(X, columns=cat_columns)
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring='neg_mean_squared_error', cv=5)).mean()
return rmse
def evaluation(y, predictions):
mae = mean_absolute_error(y, predictions)
mse = mean_squared_error(y, predictions)
rmse = np.sqrt(mean_squared_error(y, predictions))
r_squared = r2_score(y, predictions)
return (mae, mse, rmse, r_squared)
models = pd.DataFrame(columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Score', 'RMSE (Cross-Validation)'])
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
prediction = lin_reg.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, prediction)
print('MAE:', mae)
print('MSE:', mse)
print('RMSE:', rmse)
print('R2 Score:', r_squared)
print('-' * 30)
rmse_cross_val = rmse_cv(lin_reg)
print('RMSE Cross-Validation:', rmse_cross_val)
new_row = {'Model': 'LinearRegression', 'MAE': mae, 'MSE': mse, 'RMSE': rmse, 'R2 Score': r_squared, 'RMSE (Cross-Validation)': rmse_cross_val}
models = models.append(new_row, ignore_index=True) | code |
130004323/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
important = list(data.corr()['SalePrice'][(data.corr()['SalePrice'] > 0.5) | (data.corr()['SalePrice'] < -0.5)].index)
cat_columns = ['MSZoning', 'Utilities', 'BldgType', 'Heating', 'KitchenQual', 'SaleCondition', 'LandSlope']
important_columns = important + cat_columns
data = data[important_columns]
print('Missing Values by Column')
print('-' * 30)
print(data.isna().sum())
print('-' * 30)
print('Numer of missing values:', data.isna().sum().sum()) | code |
18102746/cell_21 | [
"image_output_11.png",
"text_plain_output_5.png",
"text_plain_output_9.png",
"image_output_14.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_8.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_9.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Deliveries_extras_analysed = Deliveries_Data[['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']]
Deliveries_extras_analysed.head(2) | code |
18102746/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
def Team_Win_By_Zone(Team_Name):
Wins_Analysed = Matches_Data[((Matches_Data['team1'] == Team_Name) | (Matches_Data['team2'] == Team_Name)) & (Matches_Data['winner'] == Team_Name)]
Team_Win_By_Zone('Chennai Super Kings') | code |
18102746/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5)) | code |
18102746/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
Distinct_Team = Matches_Data['team1'].unique()
Total_Teams = len(Distinct_Team)
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == Team_Name]
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Team_Data = Deliveries_Data[Deliveries_Data['batting_team'] == Team_Name]
Deliveries_Type_defined = Team_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Deliveries_Type_defined['Death_Over'].sum() / (Deliveries_Type_defined['Death_Over'].sum() + Deliveries_Type_defined['Middle_Over'].sum() + Deliveries_Type_defined['Powerplay_Over'].sum())
Deliveries_Type_defined = pd.Series.to_frame(Deliveries_Type_defined.sum()).reset_index()
Column_Names = ['Over_Type', 'Total_Score']
Overtype_defined_names = pd.DataFrame(data=Deliveries_Type_defined.values, columns=Column_Names)
Overtype_defined_names['%Contribution'] = Overtype_defined_names['Total_Score'] / Overtype_defined_names['Total_Score'].sum() * 100
Deliveries_Data.columns
Deliveries_extras_analysed = Deliveries_Data[['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']]
Extra_Columns = ['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']
Extra_Columns
Deliveries_extras_analysed['Total_Extras'] = Deliveries_extras_analysed[Extra_Columns].sum(axis=1)
Extras_Over_type = Deliveries_extras_analysed.groupby(['bowling_team', 'over_type'])['extra_runs'].sum().unstack()
Extras_Runs = Deliveries_extras_analysed.groupby(['bowling_team', 'batting_team'])['extra_runs'].sum().unstack().fillna(0)
Runs_Values_Analysed = ["wide_runs","bye_runs","legbye_runs","noball_runs","penalty_runs","batsman_runs","extra_runs","total_runs"]
Total_Extras = len(Runs_Values_Analysed)
for i in range(Total_Extras):
Extra_Name = Runs_Values_Analysed[i]
print(Extra_Name)
#Deliveries_extras_analysed = Deliveries_extras_analysed[Deliveries_extras_analysed[Extra_Name]]
Extras_By_team = Deliveries_extras_analysed.groupby(['bowling_team',Extra_Name])['extra_runs'].sum().unstack().fillna(0)
Extras_By_team_Columns = Extras_By_team.columns
String = "Total"+Extra_Name
Extras_By_team[String] = Extras_By_team[Extras_By_team_Columns].sum(axis=1)
Extras_By_team = Extras_By_team.sort_values(by=String, ascending = False)
String_Plot = "Plot"+Extra_Name
String_Plot = Extras_By_team[String].plot(kind = "bar",figsize=((15,5)))
plt.xlabel('Toss_decision, Zone',fontsize = 10)
#plt.ylabel('Ticket Count',fontsize = 10)
plt.title(Team_Name,fontsize = 15)
print(plt.show())
All_2_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 2]
All_2_Detailed.head(2)
All_3_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 2]
All_3_Detailed.head(2)
All_4_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 4]
All_4_Detailed.head(2)
All_5_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 5]
All_5_Detailed.head(2)
All_6_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 6]
All_6_Detailed.head(2)
All_7_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 7]
All_7_Detailed.head(2)
All_8_Detailed = Deliveries_extras_analysed[Deliveries_extras_analysed['batsman_runs'] == 8]
All_8_Detailed.head(2) | code |
18102746/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
Distinct_Team = Matches_Data['team1'].unique()
Total_Teams = len(Distinct_Team)
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == Team_Name]
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Team_Data = Deliveries_Data[Deliveries_Data['batting_team'] == Team_Name]
Deliveries_Type_defined = Team_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Deliveries_Type_defined['Death_Over'].sum() / (Deliveries_Type_defined['Death_Over'].sum() + Deliveries_Type_defined['Middle_Over'].sum() + Deliveries_Type_defined['Powerplay_Over'].sum())
Deliveries_Type_defined = pd.Series.to_frame(Deliveries_Type_defined.sum()).reset_index()
Column_Names = ['Over_Type', 'Total_Score']
Overtype_defined_names = pd.DataFrame(data=Deliveries_Type_defined.values, columns=Column_Names)
Overtype_defined_names['%Contribution'] = Overtype_defined_names['Total_Score'] / Overtype_defined_names['Total_Score'].sum() * 100
Deliveries_Data.columns
Deliveries_extras_analysed = Deliveries_Data[['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']]
Extra_Columns = ['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']
Extra_Columns
Deliveries_extras_analysed['Total_Extras'] = Deliveries_extras_analysed[Extra_Columns].sum(axis=1)
Extras_Over_type = Deliveries_extras_analysed.groupby(['bowling_team', 'over_type'])['extra_runs'].sum().unstack()
Extras_Runs = Deliveries_extras_analysed.groupby(['bowling_team', 'batting_team'])['extra_runs'].sum().unstack().fillna(0)
Runs_Values_Analysed = ['wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']
Total_Extras = len(Runs_Values_Analysed)
for i in range(Total_Extras):
Extra_Name = Runs_Values_Analysed[i]
print(Extra_Name)
Extras_By_team = Deliveries_extras_analysed.groupby(['bowling_team', Extra_Name])['extra_runs'].sum().unstack().fillna(0)
Extras_By_team_Columns = Extras_By_team.columns
String = 'Total' + Extra_Name
Extras_By_team[String] = Extras_By_team[Extras_By_team_Columns].sum(axis=1)
Extras_By_team = Extras_By_team.sort_values(by=String, ascending=False)
String_Plot = 'Plot' + Extra_Name
String_Plot = Extras_By_team[String].plot(kind='bar', figsize=(15, 5))
plt.xlabel('Toss_decision, Zone', fontsize=10)
plt.title(Team_Name, fontsize=15)
print(plt.show()) | code |
18102746/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Delivery_matrix = Deliveries_Data.groupby(['batsman', 'over_type'])['total_runs'].sum().unstack()
Type_Of_Wicket = Deliveries_Data
Type_Of_Wicket['dismissal_kind'].value_counts() | code |
18102746/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns | code |
18102746/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
Overall_Matches_Zone['%Contribution'].plot(kind='bar', figsize=(15, 5), color='darkorange', edgecolor='black', hatch='X') | code |
18102746/cell_29 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_8.png",
"image_output_6.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Delivery_matrix = Deliveries_Data.groupby(['batsman', 'over_type'])['total_runs'].sum().unstack()
print(sns.violinplot(x='ball', y='total_runs', data=Deliveries_Data).set_title('Total Runs by Ball')) | code |
18102746/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
def Performance_Team_Venue(Team_Name):
Performance_ByTeam_Analysed = Matches_Data[(Matches_Data['Home_City_Team1'] == Team_Name) | (Matches_Data['Home_City_team2'] == Team_Name)]
Performance_Team_Venue('Mumbai') | code |
18102746/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind='bar', figsize=(15, 5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15, 1))) | code |
18102746/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
print(Plot.get_legend().set_bbox_to_anchor((1, 1))) | code |
18102746/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Delivery_matrix = Deliveries_Data.groupby(['batsman', 'over_type'])['total_runs'].sum().unstack()
Delivery_matrix['Middle_Over'].sum()
Delivery_matrix['Powerplay_Over'].sum() | code |
18102746/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('season')['Winner_Zone_Type'].value_counts().unstack().plot(kind='bar', stacked=True, figsize=(15, 5)) | code |
18102746/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
Distinct_Team = Matches_Data['team1'].unique()
Total_Teams = len(Distinct_Team)
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == Team_Name]
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
Matches_Data_Toss_Comparision.groupby(['toss_decision', 'Zone'])['Win_comparison'].value_counts().unstack().plot(kind='bar', figsize=(15, 5))
plt.xlabel('Toss_decision, Zone', fontsize=10)
plt.title(Team_Name, fontsize=15)
print(plt.show()) | code |
18102746/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
Distinct_Team = Matches_Data['team1'].unique()
Total_Teams = len(Distinct_Team)
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == Team_Name]
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == 'Mumbai Indians']
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
print(Matches_Data_Toss_Comparision.groupby(['toss_decision', 'Zone'])['Win_comparison'].value_counts().unstack().plot(kind='bar', stacked=True, figsize=(15, 5))) | code |
18102746/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data['over'] < 7]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team', 'over'])['total_runs'].sum().unstack().plot(kind='bar', figsize=(15, 5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18, 1)) | code |
18102746/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Delivery_matrix = Deliveries_Data.groupby(['batsman', 'over_type'])['total_runs'].sum().unstack()
Type_Of_Wicket = Deliveries_Data
Type_Of_Wicket['dismissal_kind'].value_counts()
Type_Of_Wicket_Clean = Type_Of_Wicket.dropna()
Type_Of_Wicket_Clean.head(20)
Type_Of_Wicket_Clean['bowler'].value_counts().sort_values(ascending=False)
Type_Of_Wicket_Clean.groupby(['bowler', 'over_type'])['player_dismissed'].count().unstack().sort_values(by='Death_Over', ascending=False) | code |
18102746/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_Zone = pd.Series.to_frame(Matches_Data['Zone'].value_counts())
Overall_Matches_Zone['%Contribution'] = Overall_Matches_Zone['Zone'] / Overall_Matches_Zone['Zone'].sum() * 100
import matplotlib.pyplot as plt
Matches_understood_by_year = pd.Series.to_frame(Matches_Data.groupby('season')['Zone'].value_counts())
Plot_Zone = Matches_Data.groupby('season')['Zone'].value_counts().unstack().plot(kind="bar", figsize = (15,5))
print(Plot_Zone.get_legend().set_bbox_to_anchor((0.15,1)))
Matches_Data.groupby('city')['Winner_Zone_Type'].value_counts().unstack().dropna().plot(kind='bar', figsize=(15, 5))
Distinct_Team = Matches_Data['team1'].unique()
Total_Teams = len(Distinct_Team)
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Matches_Data_Toss_Comparision = Matches_Data[['city', 'Zone', 'toss_winner', 'toss_decision', 'Toss_Win_Zone', 'winner', 'Winner_Zone']]
Matches_Data_Toss_Comparision = Matches_Data_Toss_Comparision[Matches_Data_Toss_Comparision['toss_winner'] == Team_Name]
Matches_Data_Toss_Comparision['Win_comparison'] = Matches_Data_Toss_Comparision.apply(lambda x: 'Win' if x['toss_winner'] == x['winner'] else 'Lost', axis=1)
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
for i in range(Total_Teams):
Team_Name = Distinct_Team[i]
Team_Data = Deliveries_Data[Deliveries_Data['batting_team'] == Team_Name]
Deliveries_Type_defined = Team_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Deliveries_Type_defined['Death_Over'].sum() / (Deliveries_Type_defined['Death_Over'].sum() + Deliveries_Type_defined['Middle_Over'].sum() + Deliveries_Type_defined['Powerplay_Over'].sum())
Deliveries_Type_defined = pd.Series.to_frame(Deliveries_Type_defined.sum()).reset_index()
Column_Names = ['Over_Type', 'Total_Score']
Overtype_defined_names = pd.DataFrame(data=Deliveries_Type_defined.values, columns=Column_Names)
Overtype_defined_names['%Contribution'] = Overtype_defined_names['Total_Score'] / Overtype_defined_names['Total_Score'].sum() * 100
Deliveries_Data.columns
Deliveries_extras_analysed = Deliveries_Data[['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']]
Extra_Columns = ['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']
Extra_Columns
Deliveries_extras_analysed['Total_Extras'] = Deliveries_extras_analysed[Extra_Columns].sum(axis=1)
Extras_Over_type = Deliveries_extras_analysed.groupby(['bowling_team', 'over_type'])['extra_runs'].sum().unstack()
Extras_Runs = Deliveries_extras_analysed.groupby(['bowling_team', 'batting_team'])['extra_runs'].sum().unstack().fillna(0)
Runs_Values_Analysed = ["wide_runs","bye_runs","legbye_runs","noball_runs","penalty_runs","batsman_runs","extra_runs","total_runs"]
Total_Extras = len(Runs_Values_Analysed)
for i in range(Total_Extras):
Extra_Name = Runs_Values_Analysed[i]
print(Extra_Name)
#Deliveries_extras_analysed = Deliveries_extras_analysed[Deliveries_extras_analysed[Extra_Name]]
Extras_By_team = Deliveries_extras_analysed.groupby(['bowling_team',Extra_Name])['extra_runs'].sum().unstack().fillna(0)
Extras_By_team_Columns = Extras_By_team.columns
String = "Total"+Extra_Name
Extras_By_team[String] = Extras_By_team[Extras_By_team_Columns].sum(axis=1)
Extras_By_team = Extras_By_team.sort_values(by=String, ascending = False)
String_Plot = "Plot"+Extra_Name
String_Plot = Extras_By_team[String].plot(kind = "bar",figsize=((15,5)))
plt.xlabel('Toss_decision, Zone',fontsize = 10)
#plt.ylabel('Ticket Count',fontsize = 10)
plt.title(Team_Name,fontsize = 15)
print(plt.show())
Deliveries_extras_analysed.head(2) | code |
18102746/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Deliveries_extras_analysed = Deliveries_Data[['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']]
Extra_Columns = ['batting_team', 'bowling_team', 'over_type', 'bowler', 'batsman', 'wide_runs', 'bye_runs', 'legbye_runs', 'noball_runs', 'penalty_runs', 'batsman_runs', 'extra_runs', 'total_runs']
Extra_Columns
Deliveries_extras_analysed['Total_Extras'] = Deliveries_extras_analysed[Extra_Columns].sum(axis=1)
Deliveries_extras_analysed.head(2)
Extras_Over_type = Deliveries_extras_analysed.groupby(['bowling_team', 'over_type'])['extra_runs'].sum().unstack()
Extras_Runs = Deliveries_extras_analysed.groupby(['bowling_team', 'batting_team'])['extra_runs'].sum().unstack().fillna(0)
Extras_Over_type.plot(kind='bar', figsize=(15, 5)).get_legend().set_bbox_to_anchor((1, 1)) | code |
18102746/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Deliveries_Data.head(2)
Deliveries_Powerplay = Deliveries_Data.loc[Deliveries_Data["over"] < 7]
#Delivery_team = Deliveries_Powerplay[Deliveries_Powerplay["batting_team"] == "Sunrisers Hyderabad"]
Deliveries_team_analysed = Deliveries_Powerplay.groupby(['batting_team','over'])['total_runs'].sum().unstack().plot(kind="bar",figsize = (15,5))
Deliveries_team_analysed.get_legend().set_bbox_to_anchor((0.18,1))
#Deliveries_team_analysed
#.sort_values(by ="total_runs", ascending = False)
Contribution_data = Deliveries_Data.groupby(['batting_team', 'over_type'])['total_runs'].sum().unstack()
Contribution_data['%Runs_Powerplay'] = Contribution_data['Powerplay_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_MiddleOvers'] = Contribution_data['Middle_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data['%Runs_DeathOvers'] = Contribution_data['Death_Over'] / (Contribution_data['Death_Over'] + Contribution_data['Middle_Over'] + Contribution_data['Powerplay_Over']) * 100
Contribution_data
Plot = Contribution_data[['%Runs_Powerplay', '%Runs_MiddleOvers', '%Runs_DeathOvers']].sort_values(by='%Runs_DeathOvers', ascending=False).plot(kind='bar', stacked=True, figsize=(15, 5))
Deliveries_Data.columns
Delivery_matrix = Deliveries_Data.groupby(['batsman', 'over_type'])['total_runs'].sum().unstack()
print('Best Death Over Batsman')
print(Delivery_matrix['Death_Over'].sort_values(ascending=False).head(1))
print('Best Powerplay Over Batsman')
print(Delivery_matrix['Powerplay_Over'].sort_values(ascending=False).head(1))
print('Best Middle Over Batsman')
print(Delivery_matrix['Middle_Over'].sort_values(ascending=False).head(1)) | code |
18102746/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
Deliveries_Data = pd.read_csv('../input/deliveries_expanded.csv')
Matches_Data = pd.read_csv('../input/matches_expanded.csv')
Overall_Matches_State = pd.Series.to_frame(Matches_Data['State'].value_counts())
Overall_Matches_State['%Contribution'] = Overall_Matches_State['State'] / Overall_Matches_State['State'].sum() * 100
Overall_Matches_State['%Contribution'].plot(kind='bar', figsize=(15, 5), color='darkorange', edgecolor='black', hatch='X') | code |
121153872/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
print(missing) | code |
121153872/cell_9 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.info() | code |
121153872/cell_30 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
def customized_scatterplot(y, x):
style.use('seaborn-darkgrid')
customized_scatterplot(train.SalePrice, train.GrLivArea) | code |
121153872/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
plt.figure(figsize=(15, 6))
sns.heatmap(correlation, fmt='.5f', linewidth=0.5, cmap='BuPu')
plt.show() | code |
121153872/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape | code |
121153872/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
def customized_scatterplot(y, x):
style.use('seaborn-darkgrid')
customized_scatterplot(train.SalePrice, train.OverallQual) | code |
121153872/cell_19 | [
"image_output_1.png"
] | import matplotlib.style as style
import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
print(correlation) | code |
121153872/cell_7 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.head() | code |
121153872/cell_32 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
def customized_scatterplot(y, x):
style.use('seaborn-darkgrid')
customized_scatterplot(train.SalePrice, train.GarageArea) | code |
121153872/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
def customized_scatterplot(y, x):
style.use('seaborn-darkgrid')
sns.barplot(train.OverallCond, train.SalePrice) | code |
121153872/cell_8 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T | code |
121153872/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.style as style
import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
print('Number of duplicated instances:', duplicated.sum())
print(train[duplicated]) | code |
121153872/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
import seaborn as sns
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
duplicated = train.duplicated()
correlation = train.corr()
def customized_scatterplot(y, x):
style.use('seaborn-darkgrid')
sns.distplot(train['SalePrice']) | code |
121153872/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.style as style
import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
total = train.isnull().sum().sort_values(ascending=False)[train.isnull().sum().sort_values(ascending=False) != 0]
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)[(train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) != 0]
missing = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing = train.isnull().sum()
missing = missing[missing > 0]
style.use('seaborn-darkgrid')
missing.sort_values(inplace=True)
missing.plot.bar() | code |
121153872/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts() | code |
121153872/cell_12 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
train = pd.read_csv('train.csv')
train.shape
train.describe().T
train.dtypes.value_counts()
msno.matrix(train) | code |
17108148/cell_13 | [
"image_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
def convert_index_to_atom(a, b, atom_index):
c = a.merge(b, how='left', left_on=['molecule_name', f'atom_index_{atom_index}'], right_on=['molecule_name', 'atom_index'])
c.drop('atom_index', axis=1, inplace=True)
c.rename(columns={'atom': f'atom_{atom_index}', 'x': f'x_{atom_index}', 'y': f'y_{atom_index}', 'z': f'z_{atom_index}'}, inplace=True)
c.drop(f'atom_index_{atom_index}', axis=1, inplace=True)
return c
c = convert_index_to_atom(df_train, structures, 0)
c = convert_index_to_atom(c, structures, 1)
types = list(c.groupby('type').groups)
types
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHC']['x_0']
y = c[c['type'] == '1JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHC')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHN']['x_0']
y = c[c['type'] == '1JHN']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHN')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '2JHC']['x_0']
y = c[c['type'] == '2JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('2JHC')
fig.tight_layout()
plt.show() | code |
17108148/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
list(df_train['type'].unique()) | code |
17108148/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
def convert_index_to_atom(a, b, atom_index):
c = a.merge(b, how='left', left_on=['molecule_name', f'atom_index_{atom_index}'], right_on=['molecule_name', 'atom_index'])
c.drop('atom_index', axis=1, inplace=True)
c.rename(columns={'atom': f'atom_{atom_index}', 'x': f'x_{atom_index}', 'y': f'y_{atom_index}', 'z': f'z_{atom_index}'}, inplace=True)
c.drop(f'atom_index_{atom_index}', axis=1, inplace=True)
return c
c = convert_index_to_atom(df_train, structures, 0)
c = convert_index_to_atom(c, structures, 1)
types = list(c.groupby('type').groups)
types
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHC']['x_0']
y = c[c['type'] == '1JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHC')
fig.tight_layout()
plt.show() | code |
17108148/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
print(os.listdir('../input')) | code |
17108148/cell_14 | [
"image_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
def convert_index_to_atom(a, b, atom_index):
c = a.merge(b, how='left', left_on=['molecule_name', f'atom_index_{atom_index}'], right_on=['molecule_name', 'atom_index'])
c.drop('atom_index', axis=1, inplace=True)
c.rename(columns={'atom': f'atom_{atom_index}', 'x': f'x_{atom_index}', 'y': f'y_{atom_index}', 'z': f'z_{atom_index}'}, inplace=True)
c.drop(f'atom_index_{atom_index}', axis=1, inplace=True)
return c
c = convert_index_to_atom(df_train, structures, 0)
c = convert_index_to_atom(c, structures, 1)
types = list(c.groupby('type').groups)
types
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHC']['x_0']
y = c[c['type'] == '1JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHC')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHN']['x_0']
y = c[c['type'] == '1JHN']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHN')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '2JHC']['x_0']
y = c[c['type'] == '2JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('2JHC')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '2JHH']['x_0']
y = c[c['type'] == '2JHH']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('2JHH')
fig.tight_layout()
plt.show() | code |
17108148/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
def convert_index_to_atom(a, b, atom_index):
c = a.merge(b, how='left', left_on=['molecule_name', f'atom_index_{atom_index}'], right_on=['molecule_name', 'atom_index'])
c.drop('atom_index', axis=1, inplace=True)
c.rename(columns={'atom': f'atom_{atom_index}', 'x': f'x_{atom_index}', 'y': f'y_{atom_index}', 'z': f'z_{atom_index}'}, inplace=True)
c.drop(f'atom_index_{atom_index}', axis=1, inplace=True)
return c
c = convert_index_to_atom(df_train, structures, 0)
c = convert_index_to_atom(c, structures, 1)
types = list(c.groupby('type').groups)
types | code |
17108148/cell_12 | [
"image_output_1.png"
] | import pandas as pd # data , CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
structures = pd.read_csv('../input/structures.csv')
molecule = 'dsgdb9nsd_000001'
a = df_train.loc[df_train['molecule_name'] == f'{molecule}']
b = structures[structures.molecule_name == f'{molecule}']
def convert_index_to_atom(a, b, atom_index):
c = a.merge(b, how='left', left_on=['molecule_name', f'atom_index_{atom_index}'], right_on=['molecule_name', 'atom_index'])
c.drop('atom_index', axis=1, inplace=True)
c.rename(columns={'atom': f'atom_{atom_index}', 'x': f'x_{atom_index}', 'y': f'y_{atom_index}', 'z': f'z_{atom_index}'}, inplace=True)
c.drop(f'atom_index_{atom_index}', axis=1, inplace=True)
return c
c = convert_index_to_atom(df_train, structures, 0)
c = convert_index_to_atom(c, structures, 1)
types = list(c.groupby('type').groups)
types
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHC']['x_0']
y = c[c['type'] == '1JHC']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHC')
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(10, 5))
x = c[c['type'] == '1JHN']['x_0']
y = c[c['type'] == '1JHN']['y_0']
ax.scatter(x, y)
ax.grid(True)
ax.set_title('1JHN')
fig.tight_layout()
plt.show() | code |
88092182/cell_21 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
from sklearn.preprocessing import OneHotEncoder
a = train.target.values
a = a.reshape(-1, 1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(a))
OH_cols_train.index = train.index
num_X_train = train.drop('target', axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_train
target = ['Bacteroides_fragilis', 'Campylobacter_jejuni', 'Enterococcus_hirae', 'Escherichia_coli', 'Escherichia_fergusonii', 'Klebsiella_pneumoniae', 'Salmonella_enterica', 'Staphylococcus_aureus', 'Streptococcus_pneumoniae', 'Streptococcus_pyogenes']
y_train.shape
model = None
model = keras.Sequential([layers.BatchNormalization(), layers.Dense(100, activation='relu', input_shape=[286]), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(80, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(40, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(10, activation='relu'), layers.BatchNormalization(), layers.Dense(10, activation='sigmoid')])
model.compile(optimizer='adam', loss='categorical_crossentropy')
history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=3000, epochs=100)
sns.set(rc={'figure.figsize': (10, 5)})
history_df = pd.DataFrame(history.history)
a = model.predict(X_valid)
a = pd.DataFrame(data=a, columns=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
a = a.idxmax(axis=1)
a.index = y_valid.index
y_valid = y_valid.idxmax(axis=1)
prediction = model.predict(test)
prediction_df = pd.DataFrame(data=prediction, columns=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
prediction_df['target'] = prediction_df.idxmax(axis=1)
prediction_df.index = test.index
submission['row_id'] = test.index
submission['target'] = prediction_df['target']
submission.loc[submission['target'] == 0, 'target'] = target[0]
submission.loc[submission['target'] == 1, 'target'] = target[1]
submission.loc[submission['target'] == 2, 'target'] = target[2]
submission.loc[submission['target'] == 3, 'target'] = target[3]
submission.loc[submission['target'] == 4, 'target'] = target[4]
submission.loc[submission['target'] == 5, 'target'] = target[5]
submission.loc[submission['target'] == 6, 'target'] = target[6]
submission.loc[submission['target'] == 7, 'target'] = target[7]
submission.loc[submission['target'] == 8, 'target'] = target[8]
submission.loc[submission['target'] == 9, 'target'] = target[9]
submission.head() | code |
88092182/cell_13 | [
"text_plain_output_1.png"
] | y_train.shape | code |
88092182/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
train.head() | code |
88092182/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
from sklearn.preprocessing import OneHotEncoder
a = train.target.values
a = a.reshape(-1, 1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(a))
OH_cols_train.index = train.index
num_X_train = train.drop('target', axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_train
y_train.shape
model = None
model = keras.Sequential([layers.BatchNormalization(), layers.Dense(100, activation='relu', input_shape=[286]), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(80, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(40, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(10, activation='relu'), layers.BatchNormalization(), layers.Dense(10, activation='sigmoid')])
model.compile(optimizer='adam', loss='categorical_crossentropy')
history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=3000, epochs=100)
sns.set(rc={'figure.figsize': (10, 5)})
history_df = pd.DataFrame(history.history)
a = model.predict(X_valid)
a = pd.DataFrame(data=a, columns=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
a = a.idxmax(axis=1)
a.index = y_valid.index
y_valid = y_valid.idxmax(axis=1)
prediction = model.predict(test)
prediction_df = pd.DataFrame(data=prediction, columns=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
prediction_df['target'] = prediction_df.idxmax(axis=1)
prediction_df.index = test.index
submission['row_id'] = test.index
submission['target'] = prediction_df['target']
submission.head() | code |
88092182/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow import keras
from tensorflow.keras import layers | code |
88092182/cell_8 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
from sklearn.preprocessing import OneHotEncoder
a = train.target.values
a = a.reshape(-1, 1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(a))
OH_cols_train.index = train.index
num_X_train = train.drop('target', axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_train | code |
88092182/cell_15 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
from sklearn.preprocessing import OneHotEncoder
a = train.target.values
a = a.reshape(-1, 1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(a))
OH_cols_train.index = train.index
num_X_train = train.drop('target', axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_train
y_train.shape
model = None
model = keras.Sequential([layers.BatchNormalization(), layers.Dense(100, activation='relu', input_shape=[286]), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(80, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(40, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(10, activation='relu'), layers.BatchNormalization(), layers.Dense(10, activation='sigmoid')])
model.compile(optimizer='adam', loss='categorical_crossentropy')
history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=3000, epochs=100)
sns.set(rc={'figure.figsize': (10, 5)})
history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot() | code |
88092182/cell_16 | [
"text_html_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0)
test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0)
submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0)
cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()]
cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()]
from sklearn.preprocessing import OneHotEncoder
a = train.target.values
a = a.reshape(-1, 1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(a))
OH_cols_train.index = train.index
num_X_train = train.drop('target', axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_train
y_train.shape
model = None
model = keras.Sequential([layers.BatchNormalization(), layers.Dense(100, activation='relu', input_shape=[286]), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(80, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(40, activation='relu'), layers.Dropout(0.25), layers.BatchNormalization(), layers.Dense(10, activation='relu'), layers.BatchNormalization(), layers.Dense(10, activation='sigmoid')])
model.compile(optimizer='adam', loss='categorical_crossentropy')
history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=3000, epochs=100)
sns.set(rc={'figure.figsize': (10, 5)})
history_df = pd.DataFrame(history.history)
a = model.predict(X_valid)
a = pd.DataFrame(data=a, columns=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
a = a.idxmax(axis=1)
a.index = y_valid.index
y_valid = y_valid.idxmax(axis=1)
print(accuracy_score(a, y_valid) * 100) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.