kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
6,942,227 | train['Faregroup'] = np.where(train['Fare']<15,0,1)
test['Faregroup'] = np.where(test['Fare']<15,0,1 )<feature_engineering> | df2 = pd.DataFrame([round2] ).transpose()
df2.columns = ['Round2_Days']
df2 | Santa's Workshop Tour 2019 |
6,942,227 | train['Famgroup'] = np.where(train['Family']<2,0,(np.where(( train['Family']>1)&(train['Family']<5),1,2)))
test['Famgroup'] = np.where(test['Age']<2,0,(np.where(( test['Family']>1)&(test['Family']<5),1,2)) )<drop_column> | df3 = pd.DataFrame([round3] ).transpose()
df3.columns = ['Round3_Days']
df3 | Santa's Workshop Tour 2019 |
6,942,227 | traina = train.drop(columns=['Name','Ticket','Cabin','Fare','Age','Family','SibSp','Parch'])
testa = test.drop(columns=['Name','Ticket','Cabin','Fare','Age','Family','SibSp','Parch'] )<categorify> | df4 = pd.DataFrame([round4] ).transpose()
df4.columns = ['Round4_Days']
df4 | Santa's Workshop Tour 2019 |
6,942,227 | TestId=testa['PassengerId']
total_features=pd.concat(( traina.drop(['PassengerId','Survived'], axis=1), testa.drop(['PassengerId'], axis=1)))
total_features=pd.get_dummies(total_features, drop_first=True)
train_features=total_features[0:traina.shape[0]]
test_features=total_features[traina.shape[0]:]<split> | df5 = pd.DataFrame([round5] ).transpose()
df5.columns = ['Round5_Days']
df5 | Santa's Workshop Tour 2019 |
6,942,227 | X = train_features
y = traina['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101 )<train_on_grid> | df6 = pd.DataFrame([final] ).transpose()
df6.columns = ['Final_Days']
df6 | Santa's Workshop Tour 2019 |
6,942,227 | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier()
gbc = GradientBoostingClassifier()
svc = SVC(probability=True)
ext = ExtraTreesClassifier()
ada = AdaBoostClassifier()
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier()
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores.append(acc.mean() )<create_dataframe> | df = pd.concat([df0,df1,df2,df3,df4,df5,df6], axis=1)
df | Santa's Workshop Tour 2019 |
6,942,227 | <create_dataframe><EOS> | pivot0 = df.groupby(['choice_0'])['n_people'].agg(['sum'])
pivot0 = pd.DataFrame(pivot0)
pivot0.columns = ['First Choice']
pivot1 = df.groupby(['Prediction'])['n_people'].agg(['sum'])
pivot1 = pd.DataFrame(pivot1)
pivot1.columns = ['Prediction']
pivot2 = df.groupby(['Round2_Days'])['n_people'].agg(['sum'])
pivot2 = pd.DataFrame(pivot2)
pivot2.columns = ['Round 2']
pivot3 = df.groupby(['Round3_Days'])['n_people'].agg(['sum'])
pivot3 = pd.DataFrame(pivot3)
pivot3.columns = ['Round 3']
pivot4 = df.groupby(['Round4_Days'])['n_people'].agg(['sum'])
pivot4 = pd.DataFrame(pivot4)
pivot4.columns = ['Round 4']
pivot5 = df.groupby(['Round5_Days'])['n_people'].agg(['sum'])
pivot5 = pd.DataFrame(pivot5)
pivot5.columns = ['Round 5']
pivot6 = df.groupby(['Final_Days'])['n_people'].agg(['sum'])
pivot6 = pd.DataFrame(pivot6)
pivot6.columns = ['Final'] | Santa's Workshop Tour 2019 |
6,915,535 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<drop_column> | import numpy as np
import pandas as pd
from numba import njit
from itertools import product
from ortools.linear_solver import pywraplp | Santa's Workshop Tour 2019 |
6,915,535 | columns = importances[importances['Average']<.03]['Feature'].values
train_features.drop(columns = columns, inplace = True)
test_features.drop(columns = columns, inplace = True )<split> | def get_penalty(n, choice):
penalty = None
if choice == 0:
penalty = 0
elif choice == 1:
penalty = 50
elif choice == 2:
penalty = 50 + 9 * n
elif choice == 3:
penalty = 100 + 9 * n
elif choice == 4:
penalty = 200 + 9 * n
elif choice == 5:
penalty = 200 + 18 * n
elif choice == 6:
penalty = 300 + 18 * n
elif choice == 7:
penalty = 300 + 36 * n
elif choice == 8:
penalty = 400 + 36 * n
elif choice == 9:
penalty = 500 + 36 * n + 199 * n
else:
penalty = 500 + 36 * n + 398 * n
return penalty
def GetPreferenceCostMatrix(data):
cost_matrix = np.zeros(( N_FAMILIES, N_DAYS), dtype=np.int64)
for i in range(N_FAMILIES):
desired = data.values[i, :-1]
cost_matrix[i, :] = get_penalty(FAMILY_SIZE[i], 10)
for j, day in enumerate(desired):
cost_matrix[i, day-1] = get_penalty(FAMILY_SIZE[i], j)
return cost_matrix
def GetAccountingCostMatrix() :
ac = np.zeros(( 1000, 1000), dtype=np.float64)
for n in range(ac.shape[0]):
for n_p1 in range(ac.shape[1]):
diff = abs(n - n_p1)
ac[n, n_p1] = max(0,(n - 125)/ 400 * n**(0.5 + diff / 50.0))
return ac
@njit(fastmath=True)
def pcost(prediction):
daily_occupancy = np.zeros(N_DAYS+1, dtype=np.int64)
penalty = 0
for(i, p)in enumerate(prediction):
n = FAMILY_SIZE[i]
penalty += PCOSTM[i, p]
daily_occupancy[p] += n
return penalty, daily_occupancy
@njit(fastmath=True)
def acost(daily_occupancy):
accounting_cost = 0
n_out_of_range = 0
daily_occupancy[-1] = daily_occupancy[-2]
for day in range(N_DAYS):
n_p1 = daily_occupancy[day + 1]
n = daily_occupancy[day]
n_out_of_range +=(n > MAX_OCCUPANCY)or(n < MIN_OCCUPANCY)
accounting_cost += ACOSTM[n, n_p1]
return accounting_cost, n_out_of_range
@njit(fastmath=True)
def cost_function(prediction):
penalty, daily_occupancy = pcost(prediction)
accounting_cost, n_out_of_range = acost(daily_occupancy)
return penalty + accounting_cost + n_out_of_range*100000000 | Santa's Workshop Tour 2019 |
6,915,535 | X = train_features
y = traina['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101 )<train_on_grid> | def findBetterDay4Family(pred):
fobs = np.argsort(FAMILY_SIZE)
score = cost_function(pred)
original_score = np.inf
while original_score>score:
original_score = score
for family_id in fobs:
for pick in range(10):
day = DESIRED[family_id, pick]
oldvalue = pred[family_id]
pred[family_id] = day
new_score = cost_function(pred)
if new_score<score:
score = new_score
else:
pred[family_id] = oldvalue
print(score, end='\r')
print(score)
def stochastic_product_search(top_k, fam_size, original,
verbose=1000, verbose2=50000,
n_iter=500, random_state=2019):
best = original.copy()
best_score = cost_function(best)
np.random.seed(random_state)
for i in range(n_iter):
fam_indices = np.random.choice(range(DESIRED.shape[0]), size=fam_size)
changes = np.array(list(product(*DESIRED[fam_indices, :top_k].tolist())))
for change in changes:
new = best.copy()
new[fam_indices] = change
new_score = cost_function(new)
if new_score < best_score:
best_score = new_score
best = new
if verbose and i % verbose == 0:
print(f"Iteration
if verbose2 and i % verbose2 == 0:
print(f"Iteration
print(f"Final best score is {best_score:.2f}")
return best | Santa's Workshop Tour 2019 |
6,915,535 | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier(random_state=1)
gbc = GradientBoostingClassifier(random_state=1)
svc = SVC(probability=True)
ext = ExtraTreesClassifier(random_state=1)
ada = AdaBoostClassifier(random_state=1)
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier(random_state=1)
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores_v2 = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores_v2.append(acc.mean() )<create_dataframe> | def seed_finding(seed, prediction_input):
prediction = prediction_input.copy()
np.random.seed(seed)
best_score = cost_function(prediction)
original_score = best_score
best_pred = prediction.copy()
print("SEED: {} ORIGINAL SCORE: {}".format(seed, original_score))
for t in range(100):
for i in range(5000):
for j in range(10):
di = prediction[i]
prediction[i] = DESIRED[i, j]
cur_score = cost_function(prediction)
KT = 1
if t < 5:
KT = 1.5
elif t < 10:
KT = 4.5
else:
if cur_score > best_score + 100:
KT = 3
elif cur_score > best_score + 50 :
KT = 2.75
elif cur_score > best_score + 20:
KT = 2.5
elif cur_score > best_score + 10:
KT = 2
elif cur_score > best_score:
KT = 1.5
else:
KT = 1
prob = np.exp(-(cur_score - best_score)/ KT)
if np.random.rand() < prob:
best_score = cur_score
else:
prediction[i] = di
if best_score < original_score:
print("NEW BEST SCORE on seed {}: {}".format(seed, best_score))
original_score = best_score
best_pred = prediction.copy()
return prediction | Santa's Workshop Tour 2019 |
6,915,535 | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Score': scores,
'Score w/Feature Selection': scores_v2})
result_df = results.sort_values(by='Score w/Feature Selection', ascending=False ).reset_index(drop=True)
result_df<train_on_grid> | def solveSantaLP() :
S = pywraplp.Solver('SolveAssignmentProblem', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
x = {}
candidates = [[] for _ in range(N_DAYS)]
for i in range(N_FAMILIES):
for j in DESIRED[i, :]:
candidates[j].append(i)
x[i, j] = S.BoolVar('x[%i,%i]' %(i, j))
daily_occupancy = [S.Sum([x[i, j] * FAMILY_SIZE[i] for i in candidates[j]])
for j in range(N_DAYS)]
family_presence = [S.Sum([x[i, j] for j in DESIRED[i, :]])
for i in range(N_FAMILIES)]
preference_cost = S.Sum([PCOSTM[i, j] * x[i,j] for i in range(N_FAMILIES)
for j in DESIRED[i, :] ])
S.Minimize(preference_cost)
for j in range(N_DAYS-1):
S.Add(daily_occupancy[j] - daily_occupancy[j+1] <= 23)
S.Add(daily_occupancy[j+1] - daily_occupancy[j] <= 23)
for i in range(N_FAMILIES):
S.Add(family_presence[i] == 1)
for j in range(N_DAYS):
S.Add(daily_occupancy[j] >= MIN_OCCUPANCY)
S.Add(daily_occupancy[j] <= MAX_OCCUPANCY)
res = S.Solve()
resdict = {0:'OPTIMAL', 1:'FEASIBLE', 2:'INFEASIBLE', 3:'UNBOUNDED',
4:'ABNORMAL', 5:'MODEL_INVALID', 6:'NOT_SOLVED'}
print('LP solver result:', resdict[res])
l = [(i, j, x[i, j].solution_value())for i in range(N_FAMILIES)
for j in DESIRED[i, :]
if x[i, j].solution_value() >0]
df = pd.DataFrame(l, columns=['family_id', 'day', 'n'])
return df | Santa's Workshop Tour 2019 |
6,915,535 | penalty = ['l1', 'l2']
C = np.logspace(0, 4, 10)
hyperparams = {'penalty': penalty, 'C': C}
lrgd=GridSearchCV(estimator = LogisticRegression() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
lrgd.fit(X_train, y_train)
print(lrgd.best_score_)
print(lrgd.best_estimator_ )<train_on_grid> | def solveSantaIP(families, min_occupancy, max_occupancy):
S = pywraplp.Solver('SolveAssignmentProblem', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
n_families = len(families)
x = {}
candidates = [[] for _ in range(N_DAYS)]
for i in families:
for j in DESIRED[i, :]:
candidates[j].append(i)
x[i, j] = S.BoolVar('x[%i,%i]' %(i, j))
daily_occupancy = [S.Sum([x[i, j] * FAMILY_SIZE[i] for i in candidates[j]])
for j in range(N_DAYS)]
family_presence = [S.Sum([x[i, j] for j in DESIRED[i, :]])
for i in families]
preference_cost = S.Sum([PCOSTM[i, j] * x[i,j] for i in families
for j in DESIRED[i, :] ])
S.Minimize(preference_cost)
for i in range(n_families):
S.Add(family_presence[i] == 1)
for j in range(N_DAYS):
S.Add(daily_occupancy[j] >= min_occupancy[j])
S.Add(daily_occupancy[j] <= max_occupancy[j])
res = S.Solve()
resdict = {0:'OPTIMAL', 1:'FEASIBLE', 2:'INFEASIBLE', 3:'UNBOUNDED',
4:'ABNORMAL', 5:'MODEL_INVALID', 6:'NOT_SOLVED'}
print('MIP solver result:', resdict[res])
l = [(i, j)for i in families
for j in DESIRED[i, :]
if x[i, j].solution_value() >0]
df = pd.DataFrame(l, columns=['family_id', 'day'])
return df | Santa's Workshop Tour 2019 |
6,915,535 | n_neighbors = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20]
algorithm = ['auto']
weights = ['uniform', 'distance']
leaf_size = [1, 2, 3, 4, 5, 10, 15, 20, 25, 30]
hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size,
'n_neighbors': n_neighbors}
kngd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
kngd.fit(X_train, y_train)
print(kngd.best_score_)
print(kngd.best_estimator_ )<train_on_grid> | def solveSanta() :
df = solveSantaLP()
THRS = 0.999
assigned_df = df[df.n>THRS].copy()
unassigned_df = df[(df.n<=THRS)&(df.n>1-THRS)]
unassigned = unassigned_df.family_id.unique()
print('{} unassigned families'.format(len(unassigned)))
assigned_df['family_size'] = FAMILY_SIZE[assigned_df.family_id]
occupancy = assigned_df.groupby('day' ).family_size.sum().values
min_occupancy = np.array([max(0, MIN_OCCUPANCY-o)for o in occupancy])
max_occupancy = np.array([MAX_OCCUPANCY - o for o in occupancy])
rdf = solveSantaIP(unassigned, min_occupancy, max_occupancy)
df = pd.concat(( assigned_df[['family_id', 'day']], rdf)).sort_values('family_id')
return df.day.values | Santa's Workshop Tour 2019 |
6,915,535 | n_estimators = [10, 25, 50, 75, 100]
max_depth = [3, None]
max_features = [1, 3, 5, 7]
min_samples_split = [2, 4, 6, 8, 10]
min_samples_leaf = [2, 4, 6, 8, 10]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
rfgd=GridSearchCV(estimator = RandomForestClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
rfgd.fit(X_train, y_train)
print(rfgd.best_score_)
print(rfgd.best_estimator_ )<train_on_grid> | N_DAYS = 100
N_FAMILIES = 5000
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
FAMILY_SIZE = data.n_people.values
DESIRED = data.values[:, :-1] - 1
PCOSTM = GetPreferenceCostMatrix(data)
ACOSTM = GetAccountingCostMatrix() | Santa's Workshop Tour 2019 |
6,915,535 | Cs = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 50, 100]
gammas = [0.001, 0.01, 0.1, 1]
hyperparams = {'C': Cs, 'gamma' : gammas}
svgd=GridSearchCV(estimator = SVC(probability=True), param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
svgd.fit(X_train, y_train)
print(svgd.best_score_)
print(svgd.best_estimator_ )<train_on_grid> | %%time
prediction = solveSanta()
pc, occ = pcost(prediction)
ac, _ = acost(occ)
print('{}, {:.2f},({}, {})'.format(pc, ac, occ.min() , occ.max())) | Santa's Workshop Tour 2019 |
6,915,535 | n_restarts_optimizer = [0, 1, 2, 3]
max_iter_predict = [1, 2, 5, 10, 20, 35, 50, 100]
warm_start = [True, False]
hyperparams = {'n_restarts_optimizer': n_restarts_optimizer, 'max_iter_predict': max_iter_predict, 'warm_start': warm_start}
gpgd=GridSearchCV(estimator = GaussianProcessClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
gpgd.fit(X_train, y_train)
print(gpgd.best_score_)
print(gpgd.best_estimator_ )<train_on_grid> | new = prediction.copy()
findBetterDay4Family(new ) | Santa's Workshop Tour 2019 |
6,915,535 | n_estimators = [10, 25, 50, 75, 100, 125, 150, 200]
learning_rate = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2]
hyperparams = {'n_estimators': n_estimators, 'learning_rate': learning_rate}
adgd=GridSearchCV(estimator = AdaBoostClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
adgd.fit(X_train, y_train)
print(adgd.best_score_)
print(adgd.best_estimator_ )<train_on_grid> | final = stochastic_product_search(
top_k=2,
fam_size=8,
original=new,
n_iter=500000,
verbose=1000,
verbose2=50000,
random_state=2019
) | Santa's Workshop Tour 2019 |
6,915,535 | learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
n_estimators = [100, 250, 500, 750, 1000, 1250, 1500]
hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators}
gbgd=GridSearchCV(estimator = GradientBoostingClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
gbgd.fit(X_train, y_train)
print(gbgd.best_score_)
print(gbgd.best_estimator_ )<choose_model_class> | final = seed_finding(2019, final ) | Santa's Workshop Tour 2019 |
6,915,535 | n_estimators = [10, 25, 50, 75, 100]
max_depth = [3, None]
max_features = [1, 3, 5, 7]
min_samples_split = [2, 4, 6, 8, 10]
min_samples_leaf = [2, 4, 6, 8, 10]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
etgd=GridSearchCV(estimator = ExtraTreesClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
etgd.fit(X_train, y_train)
print(etgd.best_score_)
print(etgd.best_estimator_ )<train_on_grid> | sub = pd.DataFrame(range(N_FAMILIES), columns=['family_id'])
sub['assigned_day'] = final+1
sub.to_csv('submission.csv', index=False ) | Santa's Workshop Tour 2019 |
7,163,909 | n_estimators = [10, 15, 20, 25, 50, 75, 100, 150]
max_samples = [1, 2, 3, 5, 7, 10, 15, 20, 25, 30, 50]
max_features = [1, 3, 5, 7]
hyperparams = {'n_estimators': n_estimators, 'max_samples': max_samples, 'max_features': max_features}
bcgd=GridSearchCV(estimator = BaggingClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy", n_jobs =-1)
bcgd.fit(X_train, y_train)
print(bcgd.best_score_)
print(bcgd.best_estimator_ )<train_on_grid> | %%writefile main.cpp
using namespace std;
using namespace std::chrono;
int N_JOBS = 4;
int END_TIME = 10;//in minutes
auto START_TIME = high_resolution_clock::now() ;
constexpr array<uint8_t, 15> DISTRIBUTION{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 5}; // You can setup how many families you need for swaps and what best choice use for each family
// {2, 5} it's mean the first random family will brute force for choices 1-2 and the second random family will brute force for choices 1-5
constexpr int MAX_OCCUPANCY = 300;
constexpr int MIN_OCCUPANCY = 125;
constexpr int BEST_N = 10;
array<uint8_t, 5000> n_people;
array<array<uint8_t, 10>, 5000> choices;
array<array<uint16_t, 10>, 5000> PCOSTM;
array<array<double, 176>, 176> ACOSTM;
struct Index {
Index(array<uint8_t, 5000> assigned_days_): assigned_days(assigned_days_){
setup() ;
}
array<uint8_t, 5000> assigned_days;
array<uint16_t, 100> daily_occupancy_{};
int preference_cost_ = 0;
void setup() {
preference_cost_ = 0;
daily_occupancy_.fill(0);
for(int j = 0; j < assigned_days.size() ; ++j){
daily_occupancy_[choices[j][assigned_days[j]]] += n_people[j];
preference_cost_ += PCOSTM[j][assigned_days[j]];
}
}
double calc(const array<uint16_t, 5000>& indices, const array<uint8_t, DISTRIBUTION.size() >& change){
double accounting_penalty = 0.0;
auto daily_occupancy = daily_occupancy_;
int preference_cost = preference_cost_;
for(int i = 0; i < DISTRIBUTION.size() ; ++i){
int j = indices[i];
daily_occupancy[choices[j][assigned_days[j]]] -= n_people[j];
daily_occupancy[choices[j][ change[i]]] += n_people[j];
preference_cost += PCOSTM[j][change[i]] - PCOSTM[j][assigned_days[j]];
}
for(auto occupancy : daily_occupancy)
if(occupancy < MIN_OCCUPANCY)
return 1e12*(MIN_OCCUPANCY-occupancy);
else if(occupancy > MAX_OCCUPANCY)
return 1e12*(occupancy - MAX_OCCUPANCY);
for(int day = 0; day < 99; ++day)
accounting_penalty += ACOSTM[daily_occupancy[day]-125][daily_occupancy[day+1]-125];
accounting_penalty += ACOSTM[daily_occupancy[99]-125][daily_occupancy[99]-125];
return preference_cost + accounting_penalty;
}
void reindex(const array<uint16_t, DISTRIBUTION.size() >& indices, const array<uint8_t, DISTRIBUTION.size() >& change){
for(int i = 0; i < DISTRIBUTION.size() ; ++i){
assigned_days[indices[i]] = change[i];
}
setup() ;
}
};
static std::atomic<bool> flag(false);
static Index global_index({});
bool time_exit_fn() {
return duration_cast<minutes>(high_resolution_clock::now() -START_TIME ).count() < END_TIME;
}
void init_data() {
ifstream in(".. /input/santa-workshop-tour-2019/family_data.csv");
assert(in && "family_data.csv");
string header;
int n,x;
char comma;
getline(in, header);
for(int j = 0; j < choices.size() ; ++j){
in >> x >> comma;
for(int i = 0; i < 10; ++i){
in >> x >> comma;
choices[j][i] = x-1;
}
in >> n;
n_people[j] = n;
}
array<int, 10> pc{0, 50, 50, 100, 200, 200, 300, 300, 400, 500};
array<int, 10> pn{0, 0, 9, 9, 9, 18, 18, 36, 36, 235};
for(int j = 0; j < PCOSTM.size() ; ++j)
for(int i = 0; i < 10; ++i)
PCOSTM[j][i] = pc[i] + pn[i] * n_people[j];
for(int i = 0; i < 176; ++i)
for(int j = 0; j < 176; ++j)
ACOSTM[i][j] = i * pow(i+125, 0.5 + abs(i-j)/ 50.0)/ 400.0;
}
array<uint8_t, 5000> read_submission(string filename){
ifstream in(filename);
assert(in && "submission.csv");
array<uint8_t, 5000> assigned_day{};
string header;
int id, x;
char comma;
getline(in, header);
for(int j = 0; j < choices.size() ; ++j){
in >> id >> comma >> x;
assigned_day[j] = x-1;
auto it = find(begin(choices[j]), end(choices[j]), assigned_day[j]);
if(it != end(choices[j]))
assigned_day[j] = distance(begin(choices[j]), it);
}
return assigned_day;
}
double calc(const array<uint8_t, 5000>& assigned_days, bool print=false){
int preference_cost = 0;
double accounting_penalty = 0.0;
array<uint16_t, 100> daily_occupancy{};
for(int j = 0; j < assigned_days.size() ; ++j){
preference_cost += PCOSTM[j][assigned_days[j]];
daily_occupancy[choices[j][assigned_days[j]]] += n_people[j];
}
for(auto occupancy : daily_occupancy)
if(occupancy < MIN_OCCUPANCY)
return 1e12*(MIN_OCCUPANCY-occupancy);
else if(occupancy > MAX_OCCUPANCY)
return 1e12*(occupancy - MAX_OCCUPANCY);
for(int day = 0; day < 99; ++day)
accounting_penalty += ACOSTM[daily_occupancy[day]-125][daily_occupancy[day+1]-125];
accounting_penalty += ACOSTM[daily_occupancy[99]-125][daily_occupancy[99]-125];
if(print){
cout << preference_cost << " " << accounting_penalty << " " << preference_cost+accounting_penalty << endl;
}
return preference_cost + accounting_penalty;
}
void save_sub(const array<uint8_t, 5000>& assigned_day){
ofstream out("submission.csv");
out << "family_id,assigned_day" << endl;
for(int i = 0; i < assigned_day.size() ; ++i)
out << i << "," << choices[i][assigned_day[i]]+1 << endl;
}
const vector<array<uint8_t, DISTRIBUTION.size() >> changes = []() {
vector<array<uint8_t, DISTRIBUTION.size() >> arr;
array<uint8_t, DISTRIBUTION.size() > tmp{};
for(int i = 0; true; ++i){
arr.push_back(tmp);
tmp[0] += 1;
for(int j = 0; j < DISTRIBUTION.size() ; ++j)
if(tmp[j] >= DISTRIBUTION[j]){
if(j >= DISTRIBUTION.size() -1)
return arr;
tmp[j] = 0;
++tmp[j+1];
}
}
return arr;
}() ;
//template<class ExitFunction>
void stochastic_product_search(Index index){ // 15'360'000it/s 65ns/it 0.065µs/it
double best_local_score = calc(index.assigned_days);
thread_local std::mt19937 gen(std::random_device{}());
uniform_int_distribution<> dis(0, 4999);
array<uint16_t, 5000> indices;
iota(begin(indices), end(indices), 0);
array<uint16_t, DISTRIBUTION.size() > best_indices{};
array<uint8_t, DISTRIBUTION.size() > best_change{};
for(;time_exit_fn() ;){
bool found_better = false;
for(int k = 0; k < BEST_N; ++k){
for(int i = 0; i < DISTRIBUTION.size() ; ++i)//random swap
swap(indices[i], indices[dis(gen)]);
for(const auto& change : changes){
auto score = index.calc(indices, change);
if(score < best_local_score){
found_better = true;
best_local_score = score;
best_change = change;
copy_n(begin(indices), DISTRIBUTION.size() , begin(best_indices)) ;
}
}
}
if(flag.load() == true){
return;
}
if(found_better && flag.load() == false){ // reindex from N best if found better
flag = true;
index.reindex(best_indices, best_change);
global_index = index;
return;
}
}
}
int main() {
init_data() ;
auto assigned_day = read_submission(".. /input/submission-710934/submission.csv");
Index index(assigned_day);
global_index = index;
calc(index.assigned_days, true);
for(;time_exit_fn() ;){
std::thread threads[N_JOBS];
for(int i = 0; i < N_JOBS; i++){
threads[i] = std::thread(stochastic_product_search, index);
}
for(int i = 0; i < N_JOBS; i++){
threads[i].join() ;
}
auto best_score = calc(global_index.assigned_days, true);
save_sub(global_index.assigned_days);
flag = false;
index = global_index;
}
return 0;
}
| Santa's Workshop Tour 2019 |
7,163,909 | <train_on_grid><EOS> | !g++ -pthread -lpthread -O3 -std=c++17 -o main main.cpp | Santa's Workshop Tour 2019 |
6,942,765 | <train_on_grid><EOS> | NMB_DAYS = 100
NMB_FAMILIES = 5000
MAX_PEOPLE_PERDAY = 300
MIN_PEOPLE_PERDAY = 125
MAX_PREF_ALLOWED = 4
cost_ = 0
assignment_ = [-1 for i in range(NMB_FAMILIES)]
nmb_people_assigned_to_day_ = [0 for i in range(NMB_DAYS)]
families_assigned_to_day_ = []
for i in range(NMB_DAYS):
families_assigned_to_day_.append([])
def get_penalty(n, choice):
penalty = None
if choice == 0:
penalty = 0
elif choice == 1:
penalty = 50
elif choice == 2:
penalty = 50 + 9 * n
elif choice == 3:
penalty = 100 + 9 * n
elif choice == 4:
penalty = 200 + 9 * n
elif choice == 5:
penalty = 200 + 18 * n
elif choice == 6:
penalty = 300 + 18 * n
elif choice == 7:
penalty = 300 + 36 * n
elif choice == 8:
penalty = 400 + 36 * n
elif choice == 9:
penalty = 500 + 36 * n + 199 * n
else:
penalty = 500 + 36 * n + 398 * n
return penalty
def GetAssignmentCostMatrix(data):
cost_matrix = np.zeros(( NMB_FAMILIES, NMB_DAYS), dtype=np.int64)
for i in range(NMB_FAMILIES):
desired = data.values[i, :-1]
cost_matrix[i, :] = get_penalty(FAMILY_SIZE[i], 10)
for j, day in enumerate(desired):
cost_matrix[i, day-1] = get_penalty(FAMILY_SIZE[i], j)
return cost_matrix
def GetAccountingCostMatrix() :
ac = np.zeros(( 1000, 1000), dtype=np.float64)
for n in range(ac.shape[0]):
for n_p1 in range(ac.shape[1]):
diff = abs(n - n_p1)
ac[n, n_p1] = max(0,(n - 125)/ 400.0 * n**(0.5 + diff / 50.0))
return ac
def GetPreferenceMatrix(data):
pref_matrix = np.zeros(( NMB_FAMILIES, NMB_DAYS), dtype=np.int64)
for i in range(NMB_FAMILIES):
desired = data.values[i, :-1]
pref_matrix[i, :] = 10
for j, day in enumerate(desired):
pref_matrix[i, day-1] = j
return pref_matrix
def GetPreferenceForFamiliesMatrix(data):
pref_matrix = np.zeros(( NMB_FAMILIES, NMB_DAYS), dtype=np.int64)
pref_matrix2 = np.zeros(( NMB_FAMILIES, 10), dtype=np.int64)
for i in range(NMB_FAMILIES):
desired = data.values[i, :-1]
pref_matrix[i, :] = 10
for j, day in enumerate(desired):
pref_matrix[i, day-1] = j
for i in range(NMB_FAMILIES):
for j in range(NMB_DAYS):
if(pref_matrix[i][j] < 10):
pref_matrix2[i][pref_matrix[i][j]] = j
return pref_matrix2
swap_candidates_ = []
for i in range(NMB_DAYS):
swap_candidates_.append([])
for j in range(NMB_DAYS):
swap_candidates_[i].append([])
def preprocessing() :
nbTopForBinN = {}
for i in range(10):
for j in range(NMB_DAYS):
nbTopForBinN[i, j] = 0
for F in range(NMB_FAMILIES):
for i in range(MAX_PREF_ALLOWED + 1):
D = PREFERENCES_FOR_FAMILY[F][i]
for p in range(i, MAX_PREF_ALLOWED + 1):
nbTopForBinN[p, D] += FAMILY_SIZE[F]
nmbVarsDROPPED = 0
for i in range(NMB_DAYS):
if nbTopForBinN[0, i] < 100:
for F in range(NMB_FAMILIES):
if PREFERENCES_FOR_FAMILY[F][0] == i:
for P in range(1, MAX_PREF_ALLOWED + 1):
if PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] < 10:
nmbVarsDROPPED += 1
PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] = 10
for D in range(NMB_DAYS):
if nbTopForBinN[0, D] >= 300:
for F in range(NMB_FAMILIES):
for P in range(2, MAX_PREF_ALLOWED + 1):
if PREFERENCES_FOR_FAMILY[F][P] == D:
if PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] < 10:
nmbVarsDROPPED += 1
PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] = 10
for D in range(NMB_DAYS):
if nbTopForBinN[1, D] >= 300 and nbTopForBinN[0, D] < 300:
for F in range(NMB_FAMILIES):
for P in range(3, MAX_PREF_ALLOWED + 1):
if PREFERENCES_FOR_FAMILY[F][P] == D:
if PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] < 10:
nmbVarsDROPPED += 1
PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] = 10
for D in range(NMB_DAYS):
if nbTopForBinN[2, D] >= 300 and nbTopForBinN[0, D] < 300 and nbTopForBinN[1, D] < 300:
for F in range(NMB_FAMILIES):
for P in range(4, MAX_PREF_ALLOWED + 1):
if PREFERENCES_FOR_FAMILY[F][P] == D:
if PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] < 10:
nmbVarsDROPPED += 1
PREFERENCE_MATRIX[F, PREFERENCES_FOR_FAMILY[F][P]] = 10
def calculate_solution_cost() :
global cost_
cost_ = 0
assign_cost = 0
account_cost = 0
for i in range(NMB_FAMILIES):
cost_ += ASSIGNMENT_COST_MATRIX[i][assignment_[i]]
assign_cost = cost_
for d in range(NMB_DAYS):
today_count = nmb_people_assigned_to_day_[d]
yesterday_count = today_count
if(d < NMB_DAYS - 1):
yesterday_count = nmb_people_assigned_to_day_[d + 1]
cost_ += ACCOUNTING_COST_MATRIX[today_count][yesterday_count]
account_cost += ACCOUNTING_COST_MATRIX[today_count][yesterday_count]
return cost_, assign_cost, account_cost
def update_solution(assign):
global cost_
global nmb_people_assigned_to_day_
global assignment_
for i in range(NMB_DAYS):
nmb_people_assigned_to_day_[i] = 0
families_assigned_to_day_[i] = []
for j in range(NMB_DAYS):
swap_candidates_[i][j] = []
for F in range(NMB_FAMILIES):
day = assign[F]
assignment_[F] = day
nmb_people_assigned_to_day_[day] += FAMILY_SIZE[F]
families_assigned_to_day_[day].append(F)
for k in range(MAX_PREF_ALLOWED + 1):
DD = PREFERENCES_FOR_FAMILY[F][k]
if(PREFERENCE_MATRIX[F][DD] <= MAX_PREF_ALLOWED):
swap_candidates_[day][DD].append(F);
calculate_solution_cost()
def calculate_accounting_cost(nmb_people_assigned_to_day_, days_to_calc):
accounting_cost = 0
for d in days_to_calc:
today_count = nmb_people_assigned_to_day_[d]
yesterday_count = nmb_people_assigned_to_day_[d]
if(d < NMB_DAYS - 1):
yesterday_count = nmb_people_assigned_to_day_[d + 1]
accounting_cost += ACCOUNTING_COST_MATRIX[today_count][yesterday_count]
return accounting_cost
def check_move(F1, F2, D2):
global cost_
global nmb_people_assigned_to_day_
global assignment_
D1 = assignment_[F1]
N1 = FAMILY_SIZE[F1]
N2 = 0
if(F2 >= 0):
N2 = FAMILY_SIZE[F2]
if(nmb_people_assigned_to_day_[D1] - N1 + N2 < MIN_PEOPLE_PERDAY):
return False
if(nmb_people_assigned_to_day_[D1] - N1 + N2 > MAX_PEOPLE_PERDAY):
return False
if(nmb_people_assigned_to_day_[D2] - N2 + N1 < MIN_PEOPLE_PERDAY):
return False
if(nmb_people_assigned_to_day_[D2] - N2 + N1 > MAX_PEOPLE_PERDAY):
return False
return True
def calculate_assignment_cost_diff_with_move(F1, F2, D2):
D1 = assignment_[F1]
cost_diff = 0
cost_diff +=(ASSIGNMENT_COST_MATRIX[F1][D2] - ASSIGNMENT_COST_MATRIX[F1][D1])
if(F2 >= 0):
cost_diff +=(ASSIGNMENT_COST_MATRIX[F2][D1] - ASSIGNMENT_COST_MATRIX[F2][D2])
return cost_diff
def calculate_accounting_cost_diff_with_move(F1, F2, D2):
D1 = assignment_[F1]
N1 = FAMILY_SIZE[F1]
N2 = 0
if(F2 >= 0):
N2 = FAMILY_SIZE[F2]
accounting_cost_diff = 0
accounting_cost_old = 0
accounting_cost_new = 0
days_to_calc = {D1, D2}
if(D1 > 0):
days_to_calc.add(D1 - 1)
if(D2 > 0):
days_to_calc.add(D2 - 1)
accounting_cost_old = calculate_accounting_cost(nmb_people_assigned_to_day_, days_to_calc)
nmb_people_assigned_to_day_new_ = list(nmb_people_assigned_to_day_)
nmb_people_assigned_to_day_new_[D1] +=(N2 - N1)
nmb_people_assigned_to_day_new_[D2] +=(N1 - N2)
accounting_cost_new = calculate_accounting_cost(nmb_people_assigned_to_day_new_, days_to_calc)
accounting_cost_diff = accounting_cost_new - accounting_cost_old
return accounting_cost_diff
def perform_move(F1, F2, D2):
global cost_
global nmb_people_assigned_to_day_
global assignment_
D1 = assignment_[F1]
N1 = FAMILY_SIZE[F1]
N2 = 0
if(F2 >= 0):
N2 = FAMILY_SIZE[F2]
assignment_[F1] = D2
families_assigned_to_day_[D2].append(F1)
families_assigned_to_day_[D1].remove(F1)
for k in range(MAX_PREF_ALLOWED + 1):
DD = PREFERENCES_FOR_FAMILY[F1][k]
if(PREFERENCE_MATRIX[F1][DD] <= MAX_PREF_ALLOWED):
swap_candidates_[D2][DD].append(F1);
swap_candidates_[D1][DD].remove(F1);
if(F2 >= 0):
assignment_[F2] = D1
families_assigned_to_day_[D1].append(F2)
families_assigned_to_day_[D2].remove(F2)
for k in range(MAX_PREF_ALLOWED + 1):
DD = PREFERENCES_FOR_FAMILY[F2][k]
if(PREFERENCE_MATRIX[F2][DD] <= MAX_PREF_ALLOWED):
swap_candidates_[D1][DD].append(F2);
swap_candidates_[D2][DD].remove(F2);
nmb_people_assigned_to_day_[D1] +=(N2 - N1)
nmb_people_assigned_to_day_[D2] +=(N1 - N2)
calculate_solution_cost()
TOLRAND = 1000
TOL = 50
assCostDiffTOL = 10000000
def local_search_shift_and_swap(nmbIters):
iter = 0
while iter < nmbIters:
iter+=1
F1 = random.randint(0, NMB_FAMILIES - 1)
D1 = assignment_[F1]
r = random.randint(0, MAX_PREF_ALLOWED)
D2 = PREFERENCES_FOR_FAMILY[F1][r]
if(D1 == D2):
continue
if(PREFERENCE_MATRIX[F1][D2] > MAX_PREF_ALLOWED):
continue
F2 = -1
if(random.randint(0, 100)< 10):
if len(swap_candidates_[D2][D1])== 0:
continue
r = random.randint(0, len(swap_candidates_[D2][D1])- 1)
F2 = swap_candidates_[D2][D1][r]
if(PREFERENCE_MATRIX[F2][D1] > MAX_PREF_ALLOWED):
continue
if(check_move(F1, F2, D2)== False):
continue
ass_cost_diff = calculate_assignment_cost_diff_with_move(F1, F2, D2)
if(ass_cost_diff > assCostDiffTOL):
continue
acc_cost_diff = calculate_accounting_cost_diff_with_move(F1, F2, D2)
cost_diff = ass_cost_diff + acc_cost_diff
tol = 0
if(random.randint(0, TOLRAND)== 1):
tol = TOL
if(cost_diff <= tol and ass_cost_diff <= assCostDiffTOL):
perform_move(F1, F2, D2)
def LocalSearch(timeLimit, maxIters):
startTime = time.time()
iter = 0
global assCostDiffTOL
assCostDiffTOL = 0
best_assignment = {}
for i in range(NMB_FAMILIES):
best_assignment[i] = assignment_[i]
bestCost = cost_
while(iter < maxIters):
if(time.time() - startTime >= timeLimit):
break
iter+=1
if(iter % 5 == 0):
update_solution(best_assignment)
local_search_shift_and_swap(20000)
if(cost_ < bestCost):
bestCost = cost_
print(iter, bestCost, int(time.time() - startTime))
for i in range(NMB_FAMILIES):
best_assignment[i] = assignment_[i]
assCostDiffTOL += 1
update_solution(best_assignment)
def reg_m(y, x):
ones = np.ones(len(x[0]))
X = sm.add_constant(np.column_stack(( x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(np.column_stack(( ele, X)))
results = sm.OLS(y, X ).fit()
return results
DELTA = 1
LINEARIZE = False
def linearize() :
resultABC = []
for d in range(NMB_DAYS):
resultABC.append([])
for D in range(NMB_DAYS):
x = []
x.append([])
x.append([])
y = []
LB = max(MIN_PEOPLE_PERDAY, nmb_people_assigned_to_day_[D] - DELTA)
UB = min(MAX_PEOPLE_PERDAY, nmb_people_assigned_to_day_[D] + DELTA)
dom1 = {LB}
for i in range(LB + 1, UB + 1):
dom1.add(i)
if D < NMB_DAYS - 1:
LB = max(MIN_PEOPLE_PERDAY, nmb_people_assigned_to_day_[D + 1] - DELTA)
UB = min(MAX_PEOPLE_PERDAY, nmb_people_assigned_to_day_[D + 1] + DELTA)
dom2 = {LB}
for i in range(LB + 1, UB + 1):
dom2.add(i)
for i in dom1:
for j in dom2:
x[0].append(i)
x[1].append(j)
y.append(ACCOUNTING_COST_MATRIX[i, j])
else:
for i in dom1:
x[0].append(i)
x[1].append(i)
y.append(ACCOUNTING_COST_MATRIX[i, i])
result = reg_m(y,x)
if(nmb_people_assigned_to_day_[D] == 125):
result.params[0] = 0
result.params[1] = 10000
result.params[2] = -125 * 10000
resultABC[D] = result.params
p = resultABC[D][0]
resultABC[D][0] = resultABC[D][1]
resultABC[D][1] = p
return resultABC
def MIP(timeLimit):
solver = pywraplp.Solver('simple_mip_program', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
x = {}
C = {}
key = {}
K = 0
for i in range(NMB_FAMILIES):
for j in range(NMB_DAYS):
if PREFERENCE_MATRIX[i, j] <= MAX_PREF_ALLOWED:
key[i, j] = K
x[K] = solver.BoolVar('x[%i,%i]' %(i, j))
C[K] = ASSIGNMENT_COST_MATRIX[i, j]
K = K + 1
else:
key[i, j] = -1
nmbvar = solver.NumVariables()
print('Number of variables =', solver.NumVariables())
for i in range(NMB_FAMILIES):
expr = 0
for j in range(NMB_DAYS):
if key[i, j] >= 0:
expr = expr + x[key[i, j]]
solver.Add(expr == 1)
occ = {}
for i in range(NMB_DAYS):
occ[i] = solver.IntVar(0.0, 1000.0, 'occ[%i,%i]' %(i, 0))
for i in range(NMB_DAYS):
expr = 0
for j in range(NMB_FAMILIES):
if key[j, i] >= 0:
expr +=(x[key[j, i]] * FAMILY_SIZE[j])
LB = MIN_PEOPLE_PERDAY
UB = MAX_PEOPLE_PERDAY
if LINEARIZE:
LB = max(nmb_people_assigned_to_day_[i] - DELTA, MIN_PEOPLE_PERDAY)
UB = min(nmb_people_assigned_to_day_[i] + DELTA, MAX_PEOPLE_PERDAY)
solver.Add(expr >= LB)
solver.Add(expr <= UB)
solver.Add(occ[i] == expr)
print('Number of constraints =', solver.NumConstraints())
obj = solver.Sum([C[i] * x[i] for i in range(nmbvar)])
if LINEARIZE == True:
obj2 = 0
ABC = linearize()
for D in range(NMB_DAYS - 1):
a = ABC[D][0]
b = ABC[D][1]
c = ABC[D][2]
if D < NMB_DAYS - 1:
obj2 = obj2 +(a * occ[D] + b * occ[D + 1] + c)
else:
obj2 = obj2 +(a * occ[D] + b * occ[D] + c)
obj = obj + 1 * obj2
solver.Minimize(obj)
solver.SetTimeLimit(1000 * timeLimit)
status = solver.Solve()
print('MIP Objective value =', solver.Objective().Value())
print('Problem solved in %f milliseconds' % solver.wall_time())
assign = {}
for i in range(NMB_FAMILIES):
for j in range(NMB_DAYS):
if key[i, j] >= 0:
if x[key[i, j]].solution_value() > 0.99:
assign[i] = j
update_solution(assign)
random.seed(52)
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
FAMILY_SIZE = data.n_people.values
ASSIGNMENT_COST_MATRIX = GetAssignmentCostMatrix(data)
ACCOUNTING_COST_MATRIX = GetAccountingCostMatrix()
PREFERENCE_MATRIX = GetPreferenceMatrix(data)
PREFERENCES_FOR_FAMILY = GetPreferenceForFamiliesMatrix(data)
preprocessing()
startTime = time.time()
MIP(60)
print(calculate_solution_cost())
LocalSearch(3600, 2000)
print(calculate_solution_cost())
DELTA = 1
LINEARIZE = True
MIP(5 * 60)
print(calculate_solution_cost())
LocalSearch(10 * 3600, 500)
best_assignment = {}
for i in range(NMB_FAMILIES):
best_assignment[i] = assignment_[i]
bestCost = cost_
for i in range(5):
MIP(5 * 60)
print(calculate_solution_cost())
LocalSearch(3600, 100)
print(calculate_solution_cost())
if(cost_ < bestCost):
bestCost = cost_
print("bestCost: ", bestCost, " time: ", time.time() - startTime)
for i in range(NMB_FAMILIES):
best_assignment[i] = assignment_[i]
update_solution(best_assignment)
print("Best Solution: ", bestCost)
print("LocalSearch...")
LocalSearch(3600, 1000)
print("Final Solution: ", cost_)
with open('submission.csv', mode='w')as csv_file:
fieldnames = ['family_id', 'assigned_day']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for i in range(NMB_FAMILIES):
writer.writerow({'family_id': i, 'assigned_day': assignment_[i] + 1} ) | Santa's Workshop Tour 2019 |
7,238,572 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<train_on_grid> | %matplotlib inline
NUMBER_DAYS = 100
NUMBER_FAMILIES = 5000
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv')
submission = pd.read_csv('/kaggle/input/c-stochastic-product-search-65ns/submission.csv')
assigned_days = submission['assigned_day'].values
columns = data.columns[1:11]
DESIRED = data[columns].values
COST_PER_FAMILY = [0,50,50,100,200,200,300,300,400,500]
COST_PER_FAMILY_MEMBER = [0, 0, 9, 9, 9, 18, 18, 36, 36,235]
N_PEOPLE = data['n_people'].values
def get_daily_occupancy(assigned_days):
daily_occupancy = np.zeros(100, int)
for fid, assigned_day in enumerate(assigned_days):
daily_occupancy[assigned_day-1] += N_PEOPLE[fid]
return daily_occupancy
def cost_function(prediction):
N_DAYS = 100
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
penalty = 0
days = list(range(N_DAYS,0,-1))
tmp = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
family_size_dict = tmp[['n_people']].to_dict() ['n_people']
cols = [f'choice_{i}' for i in range(10)]
choice_dict = tmp[cols].to_dict()
daily_occupancy = {k:0 for k in days}
for f, d in enumerate(prediction):
n = family_size_dict[f]
choice_0 = choice_dict['choice_0'][f]
choice_1 = choice_dict['choice_1'][f]
choice_2 = choice_dict['choice_2'][f]
choice_3 = choice_dict['choice_3'][f]
choice_4 = choice_dict['choice_4'][f]
choice_5 = choice_dict['choice_5'][f]
choice_6 = choice_dict['choice_6'][f]
choice_7 = choice_dict['choice_7'][f]
choice_8 = choice_dict['choice_8'][f]
choice_9 = choice_dict['choice_9'][f]
daily_occupancy[d] += n
if d == choice_0:
penalty += 0
elif d == choice_1:
penalty += 50
elif d == choice_2:
penalty += 50 + 9 * n
elif d == choice_3:
penalty += 100 + 9 * n
elif d == choice_4:
penalty += 200 + 9 * n
elif d == choice_5:
penalty += 200 + 18 * n
elif d == choice_6:
penalty += 300 + 18 * n
elif d == choice_7:
penalty += 300 + 36 * n
elif d == choice_8:
penalty += 400 + 36 * n
elif d == choice_9:
penalty += 500 + 36 * n + 199 * n
else:
penalty += 500 + 36 * n + 398 * n
for _, v in daily_occupancy.items() :
if(v < MIN_OCCUPANCY):
penalty += 100000000
accounting_cost =(daily_occupancy[days[0]]-125.0)/ 400.0 * daily_occupancy[days[0]]**(0.5)
accounting_costs = [max(0, accounting_cost)]
diffs = [0]
yesterday_count = daily_occupancy[days[0]]
for day in days[1:]:
today_count = daily_occupancy[day]
diff = abs(today_count - yesterday_count)
accounting_costs.append(max(0,(today_count-125.0)/ 400.0 * today_count**(0.5 + diff / 50.0)))
yesterday_count = today_count
return penalty, sum(accounting_costs), penalty + sum(accounting_costs ) | Santa's Workshop Tour 2019 |
7,238,572 | <train_on_grid><EOS> | MAX_BEST_CHOICE = 5
NUM_SWAP = 2500
NUM_SECONDS = 1800
NUM_THREADS = 4
for _ in range(40):
solver = pywraplp.Solver('Optimization preference cost', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
daily_occupancy = get_daily_occupancy(assigned_days ).astype(float)
fids = np.random.choice(range(NUMBER_FAMILIES), NUM_SWAP, replace=False)
PCOSTM, B = {}, {}
for fid in range(NUMBER_FAMILIES):
if fid in fids:
for i in range(MAX_BEST_CHOICE):
PCOSTM[fid, DESIRED[fid][i]-1] = COST_PER_FAMILY[i] + N_PEOPLE[fid] * COST_PER_FAMILY_MEMBER[i]
B[ fid, DESIRED[fid][i]-1] = solver.BoolVar('')
else:
daily_occupancy[assigned_days[fid]-1] -= N_PEOPLE[fid]
solver.set_time_limit(NUM_SECONDS*NUM_THREADS*1000)
solver.SetNumThreads(NUM_THREADS)
for day in range(NUMBER_DAYS):
if daily_occupancy[day]:
solver.Add(solver.Sum([N_PEOPLE[fid] * B[fid, day] for fid in range(NUMBER_FAMILIES)if(fid,day)in B])== daily_occupancy[day])
for fid in fids:
solver.Add(solver.Sum(B[fid, day] for day in range(NUMBER_DAYS)if(fid, day)in B)== 1)
solver.Minimize(solver.Sum(PCOSTM[fid, day] * B[fid, day] for fid, day in B))
sol = solver.Solve()
status = ['OPTIMAL', 'FEASIBLE', 'INFEASIBLE', 'UNBOUNDED', 'ABNORMAL', 'MODEL_INVALID', 'NOT_SOLVED']
if status[sol] in ['OPTIMAL', 'FEASIBLE']:
tmp = assigned_days.copy()
for fid, day in B:
if B[fid, day].solution_value() > 0.5:
tmp[fid] = day+1
if cost_function(tmp)[2] < cost_function(assigned_days)[2]:
assigned_days = tmp
submission['assigned_day'] = assigned_days
submission.to_csv('submission.csv', index=False)
print('Result:', status[sol], cost_function(tmp))
else:
print('Result:', status[sol] ) | Santa's Workshop Tour 2019 |
7,078,845 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<train_model> | import os
import ctypes
from numpy.ctypeslib import ndpointer
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
from numba import njit, prange | Santa's Workshop Tour 2019 |
7,078,845 | ran = rfgd.best_estimator_
knn = kngd.best_estimator_
log = lrgd.best_estimator_
xgb = xggd.best_estimator_
gbc = gbgd.best_estimator_
svc = svgd.best_estimator_
ext = etgd.best_estimator_
ada = adgd.best_estimator_
gpc = gpgd.best_estimator_
bag = bcgd.best_estimator_
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores_v3 = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores_v3.append(acc.mean() )<create_dataframe> | %%writefile score.c
int cost[NF][101];
int fs[NF];
int cf[NF][10];
int loaded=0;
float acc[301][301];
void precompute_acc() {
for(int i=125;i<=300;i++)
for(int j=125;j<=300;j++)
acc[i][j] =(i-125.0)/400.0 * pow(i , 0.5 + fabs(i-j)/ 50);
}
void read_fam() {
FILE *f;
char s[1000];
int d[101],fid,n;
int *c;
f=fopen(".. /input/santa-workshop-tour-2019/family_data.csv","r");
if(fgets(s,1000,f)==NULL)
exit(-1);
for(int i=0;i<5000;i++){
c = &cf[i][0];
if(fscanf(f,"%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
&fid,&c[0],&c[1],&c[2],&c[3],&c[4],&c[5],&c[6],&c[7],&c[8],&c[9],&fs[i])!=12)
exit(-1);
// printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d
",
//fid,c[0],c[1],c[2],c[3],c[4],c[5],c[6],c[7],c[8],c[9],fs[i]);
n = fs[i];
for(int j=1;j<=100;j++){
if(j==c[0])cost[i][j]=0;
else if(j==c[1])cost[i][j]=50;
else if(j==c[2])cost[i][j]=50 + 9 * n;
else if(j==c[3])cost[i][j]=100 + 9 * n;
else if(j==c[4])cost[i][j]=200 + 9 * n;
else if(j==c[5])cost[i][j]=200 + 18 * n;
else if(j==c[6])cost[i][j]=300 + 18 * n;
else if(j==c[7])cost[i][j]=300 + 36 * n;
else if(j==c[8])cost[i][j]=400 + 36 * n;
else if(j==c[9])cost[i][j]=500 + 36 * n + 199 * n;
else cost[i][j]=500 + 36 * n + 398 * n;
}
}
}
float max_cost=1000000000;
int day_occ[102];
static inline int day_occ_ok(int d){
return !(d <125 || d>300);
}
float score(int *pred){
float r=0;
if(!loaded){
read_fam() ;
precompute_acc() ;
loaded = 1;
}
// validate day occupancy
memset(day_occ,0,101*sizeof(int)) ;
for(int i=0;i<NF;i++){
day_occ[pred[i]]+=fs[i];
r+=cost[i][pred[i]];
}
day_occ[101]=day_occ[100];
for(int d=1;d<=100;d++){
if(day_occ[d]<125)
r += 100000 *(125 - day_occ[d]);
else if(day_occ[d] > 300)
r += 100000 *(day_occ[d] - 300);
r += acc[day_occ[d]][day_occ[d+1]];
}
return r;
} | Santa's Workshop Tour 2019 |
7,078,845 | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Original Score': scores,
'Score with feature selection': scores_v2,
'Score with tuned parameters': scores_v3})
result_df = results.sort_values(by='Score with tuned parameters', ascending=False ).reset_index(drop=True)
result_df<train_model> | lib = ctypes.CDLL('./score.so')
cost_function = lib.score
cost_function.restype = ctypes.c_float
cost_function.argtypes = [ndpointer(ctypes.c_int)] | Santa's Workshop Tour 2019 |
7,078,845 | ran = rfgd.best_estimator_
knn = kngd.best_estimator_
log = lrgd.best_estimator_
xgb = xggd.best_estimator_
gbc = gbgd.best_estimator_
svc = svgd.best_estimator_
ext = etgd.best_estimator_
ada = adgd.best_estimator_
gpc = gpgd.best_estimator_
bag = bcgd.best_estimator_
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores_v4 = []
for mod in models:
mod.fit(X_train, y_train)
predict = mod.predict(X_test)
acc = accuracy_score(y_test, predict)
scores_v4.append(acc )<create_dataframe> | score = []
sub = []
name = os.listdir('/kaggle/input/santa-public')
for item in name:
score.append(int(item.split('_')[1].split('.')[0]))
sub.append(pd.read_csv('.. /input/santa-public/'+item, index_col='family_id'))
print(np.min(score))
print(len(sub)) | Santa's Workshop Tour 2019 |
7,078,845 | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Original Score': scores,
'Score with feature selection': scores_v2,
'Score with tuned parameters': scores_v3,
'Score with full accuracy': scores_v4})
result_df = results.sort_values(by='Score with full accuracy', ascending=False ).reset_index(drop=True)
result_df<predict_on_test> | top_k = 3
base_path = '/kaggle/input/santa-workshop-tour-2019/'
sub_path = '/kaggle/input/greedy-dual-and-tripple-shuffle-with-fast-scoring/'
data = pd.read_csv(base_path + 'family_data.csv', index_col='family_id')
submission = pd.read_csv(f'.. /input/santa-public/submission_{np.min(score)}.csv',
index_col='family_id')
original = submission['assigned_day'].values
original_score = cost_function(np.int32(original))
choice_matrix = data.loc[:, 'choice_0': 'choice_9'].values
print(cost_function(np.int32(original)) ) | Santa's Workshop Tour 2019 |
7,078,845 | predictions = ext.predict(test_features )<save_to_csv> | fam_weight = []
for i, s in enumerate(submission.iterrows()):
for c in range(choice_matrix.shape[1]):
if s[1].values==choice_matrix[i, c]:
fam_weight.append(c+1)
fam_weight = np.array(fam_weight)
fam_weight = fam_weight / sum(fam_weight)
print(fam_weight ) | Santa's Workshop Tour 2019 |
7,078,845 | submission=pd.DataFrame()
submission['PassengerId']=TestId
submission['Survived']=predictions
submission.to_csv('submission.csv', index=False )<load_from_csv> | redundancy = 5
choice_weight = np.zeros(( 5000, top_k))
for i in tqdm(range(5000)) :
for j in range(top_k):
for s in sub:
if choice_matrix[i, j] == s.loc[i, 'assigned_day']:
choice_weight[i, j] += 1
choice_weight += redundancy
for j in range(choice_weight.shape[0]):
choice_weight[j] /= sum(choice_weight[j])
print(choice_weight ) | Santa's Workshop Tour 2019 |
7,078,845 | print('reading input files.. ')
data = pd.read_csv('.. /input/train.csv')
sampl = pd.read_csv('.. /input/gender_submission.csv' )<load_from_csv> | def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1-axis]), axis=axis)
return(a.cumsum(axis=axis)> r ).argmax(axis=axis ) | Santa's Workshop Tour 2019 |
7,078,845 | test = pd.read_csv('.. /input/test.csv' )<concatenate> | def lucky_choice_search(top_k, fam_size, original, choice_matrix,
disable_tqdm=False, n_iter=100000000,
verbose=10000, random_state=2019):
best = original.copy()
best_score = cost_function(np.int32(best))
if random_state is not None:
np.random.seed(random_state)
fam_indices = np.random.choice(range(choice_matrix.shape[0]), size=fam_size, p=fam_weight)
for i in tqdm(range(n_iter), disable=disable_tqdm):
new = best.copy()
new[fam_indices] = choice_matrix[fam_indices, random_choice_prob_index(choice_weight[fam_indices])]
new_score = cost_function(np.int32(new))
if new_score < best_score:
best_score = new_score
best = new
print(f'{i} NEW BEST SCORE: ', best_score)
submission['assigned_day'] = best
submission.to_csv(f'submission_{best_score}.csv')
if verbose and i % verbose == 0:
print(f"Iteration
return best, best_score | Santa's Workshop Tour 2019 |
7,078,845 | df = data.append(test, sort = False )<merge> | best, best_score = lucky_choice_search(
choice_matrix=choice_matrix,
top_k=top_k,
fam_size=20,
original=original,
n_iter=250000000,
disable_tqdm=False,
random_state=20191217,
verbose=None
) | Santa's Workshop Tour 2019 |
7,078,845 | <feature_engineering><EOS> | submission['assigned_day'] = best
submission.to_csv(f'submission_{best_score}.csv' ) | Santa's Workshop Tour 2019 |
6,920,137 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<feature_engineering> | import numpy as np
import pandas as pd
from numba import njit, prange | Santa's Workshop Tour 2019 |
6,920,137 | df['FamilySurv'] = 0.5
for _, grup in df.groupby(['FamilyName','Fare']):
if len(grup)!= 1:
for index, row in grup.iterrows() :
smax = grup.drop(index ).Survived.max()
smin = grup.drop(index ).Survived.min()
pid = row.PassengerId
if smax == 1:
df.loc[df.PassengerId == pid, 'FamilySurv'] = 1.0
elif smin == 0:
df.loc[df.PassengerId == pid, 'FamilySurv'] = 0.0
for _, grup in df.groupby(['Ticket']):
if len(grup)!= 1:
for index, row in grup.iterrows() :
if(row.FamilySurv == 0.0 or row.FamilySurv == 0.5):
smax = grup.drop(index ).Survived.max()
smin = grup.drop(index ).Survived.min()
pid = row.PassengerId
if smax == 1:
df.loc[df.PassengerId == pid, 'FamilySurv'] = 1.0
elif smin == 0:
df.loc[df.PassengerId == pid, 'FamilySurv'] = 0.0
df.FamilySurv.value_counts()<feature_engineering> | data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
family_size = data.n_people.values.astype(np.int8)
penalties = np.asarray([
[
0,
50,
50 + 9 * n,
100 + 9 * n,
200 + 9 * n,
200 + 18 * n,
300 + 18 * n,
300 + 36 * n,
400 + 36 * n,
500 + 36 * n + 199 * n,
500 + 36 * n + 398 * n
] for n in range(family_size.max() + 1)
])
family_cost_matrix = np.concatenate(data.n_people.apply(lambda n: np.repeat(penalties[n, 10], 100 ).reshape(1, 100)))
for fam in data.index:
for choice_order, day in enumerate(data.loc[fam].drop("n_people")) :
family_cost_matrix[fam, day - 1] = penalties[data.loc[fam, "n_people"], choice_order]
accounting_cost_matrix = np.zeros(( 500, 500))
for n in range(accounting_cost_matrix.shape[0]):
for diff in range(accounting_cost_matrix.shape[1]):
accounting_cost_matrix[n, diff] = max(0,(n - 125.0)/ 400.0 * n**(0.5 + diff / 50.0))
@njit(fastmath=True)
def cost_function(prediction, family_size, family_cost_matrix, accounting_cost_matrix):
N_DAYS = family_cost_matrix.shape[1]
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
penalty = 0
daily_occupancy = np.zeros(N_DAYS + 1, dtype=np.int16)
for i,(pred, n)in enumerate(zip(prediction, family_size)) :
daily_occupancy[pred - 1] += n
penalty += family_cost_matrix[i, pred - 1]
accounting_cost = 0
n_low = 0
n_high = 0
daily_occupancy[-1] = daily_occupancy[-2]
for day in range(N_DAYS):
n_next = daily_occupancy[day + 1]
n = daily_occupancy[day]
n_high +=(n > MAX_OCCUPANCY)
n_low +=(n < MIN_OCCUPANCY)
diff = abs(n - n_next)
accounting_cost += accounting_cost_matrix[n, diff]
return np.asarray([penalty, accounting_cost, n_low, n_high])
def score(prediction):
fc, ac, l, h = cost_function(prediction, family_size, family_cost_matrix, accounting_cost_matrix)
return(fc + ac)+(l + h)* 1000000
fam = pd.read_csv("/kaggle/input/santa-workshop-tour-2019/family_data.csv")
pref = fam.values[:,1:-1] | Santa's Workshop Tour 2019 |
6,920,137 | def CabinNum(data):
data.Cabin = data.Cabin.fillna('0')
regex = re.compile('\s*(\w+)\s*')
data['CabinNum'] = data.Cabin.apply(lambda x : len(regex.findall(x)))
CabinNum(df )<count_values> | pred = pd.read_csv('/kaggle/input/santa-ip/submission.csv', index_col='family_id' ).assigned_day.values
init_score = score(pred)
print(init_score ) | Santa's Workshop Tour 2019 |
6,920,137 | df.CabinNum.value_counts()<filter> | sub = pd.read_csv('/kaggle/input/santa-ip/submission.csv')
!cp /kaggle/input/santa-ip/submission.csv./submission_72398.91780918743.csv | Santa's Workshop Tour 2019 |
6,920,137 | df.loc[df['Fare'].isnull() ]<filter> | def seed_finding(seed, prediction_input):
prediction = prediction_input.copy()
np.random.seed(seed)
best_score = score(prediction)
original_score = best_score
print("SEED: {} ORIGINAL SCORE: {}".format(seed, original_score))
for t in range(100):
for i in range(5000):
for j in range(10):
di = prediction[i]
prediction[i] = pref[i, j]
cur_score = score(prediction)
KT = 1
if t < 5:
KT = 1.5
elif t < 10:
KT = 4.5
else:
if cur_score > best_score + 100:
KT = 3
elif cur_score > best_score + 50 :
KT = 2.75
elif cur_score > best_score + 20:
KT = 2.5
elif cur_score > best_score + 10:
KT = 2
elif cur_score > best_score:
KT = 1.5
else:
KT = 1
prob = np.exp(-(cur_score - best_score)/ KT)
if np.random.rand() < prob:
best_score = cur_score
else:
prediction[i] = di
if best_score < original_score:
print("NEW BEST SCORE on seed {}: {}".format(seed, best_score))
sub.assigned_day = prediction
sub.to_csv(f'submission_{best_score}.csv', index=False)
break
if best_score >= original_score:
print("UNLUCKY on seed {} for 100 runs, no impovement.".format(seed))
return prediction, best_score | Santa's Workshop Tour 2019 |
6,920,137 | <feature_engineering><EOS> | best_score = init_score
for seed in range(1201, 1225):
pred, best_score = seed_finding(seed, pred)
if best_score < init_score:
init_score = best_score
else:
best_score = init_score
pred = pd.read_csv(f'submission_{best_score}.csv', index_col='family_id' ).assigned_day.values | Santa's Workshop Tour 2019 |
7,263,353 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<feature_engineering> | NUMBER_DAYS = 100
NUMBER_FAMILIES = 5000
MAX_BEST_CHOICE = 5
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv')
submission = pd.read_csv('/kaggle/input/c-stochastic-product-search-65ns/submission.csv')
assigned_days = submission['assigned_day'].values
columns = data.columns[1:11]
DESIRED = data[columns].values
COST_PER_FAMILY = [0,50,50,100,200,200,300,300,400,500]
COST_PER_FAMILY_MEMBER = [0, 0, 9, 9, 9, 18, 18, 36, 36,235]
N_PEOPLE = data['n_people'].astype(int ).values
def get_daily_occupancy(assigned_days):
daily_occupancy = np.zeros(100, np.int32)
for i, r in enumerate(assigned_days):
daily_occupancy[r-1] += N_PEOPLE[i]
return daily_occupancy
def cost_function(prediction):
N_DAYS = 100
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
penalty = 0
days = list(range(N_DAYS,0,-1))
tmp = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
family_size_dict = tmp[['n_people']].to_dict() ['n_people']
cols = [f'choice_{i}' for i in range(10)]
choice_dict = tmp[cols].to_dict()
daily_occupancy = {k:0 for k in days}
for f, d in enumerate(prediction):
n = family_size_dict[f]
choice_0 = choice_dict['choice_0'][f]
choice_1 = choice_dict['choice_1'][f]
choice_2 = choice_dict['choice_2'][f]
choice_3 = choice_dict['choice_3'][f]
choice_4 = choice_dict['choice_4'][f]
choice_5 = choice_dict['choice_5'][f]
choice_6 = choice_dict['choice_6'][f]
choice_7 = choice_dict['choice_7'][f]
choice_8 = choice_dict['choice_8'][f]
choice_9 = choice_dict['choice_9'][f]
daily_occupancy[d] += n
if d == choice_0:
penalty += 0
elif d == choice_1:
penalty += 50
elif d == choice_2:
penalty += 50 + 9 * n
elif d == choice_3:
penalty += 100 + 9 * n
elif d == choice_4:
penalty += 200 + 9 * n
elif d == choice_5:
penalty += 200 + 18 * n
elif d == choice_6:
penalty += 300 + 18 * n
elif d == choice_7:
penalty += 300 + 36 * n
elif d == choice_8:
penalty += 400 + 36 * n
elif d == choice_9:
penalty += 500 + 36 * n + 199 * n
else:
penalty += 500 + 36 * n + 398 * n
for _, v in daily_occupancy.items() :
if v > MAX_OCCUPANCY or v < MIN_OCCUPANCY:
penalty += 100000000
accounting_cost = max(0,(daily_occupancy[days[0]]-125.0)/ 400.0 * daily_occupancy[days[0]]**(0.5))
yesterday_count = daily_occupancy[days[0]]
for day in days[1:]:
today_count = daily_occupancy[day]
diff = abs(today_count - yesterday_count)
accounting_cost += max(0,(today_count-125.0)/ 400.0 * today_count**(0.5 + diff / 50.0))
yesterday_count = today_count
return penalty, accounting_cost, penalty + accounting_cost | Santa's Workshop Tour 2019 |
7,263,353 | def FamlSize(data):
data['FamlSize'] = 0
data['FamlSize'] = data['SibSp'] + data['Parch'] + 1
def IsAlone(data):
data['IsAlone'] = 0
data.loc[(data['FamlSize'] == 1), 'IsAlone'] = 0
data.loc[(data['FamlSize'] > 1), 'IsAlone'] = 1
FamlSize(df)
IsAlone(df )<categorify> | %%time
for num_members in range(2, 9):
daily_occupancy = get_daily_occupancy(assigned_days)
fids = np.where(N_PEOPLE == num_members)[0]
PCOSTM = {}
for fid in range(NUMBER_FAMILIES):
if fid in fids:
for i in range(MAX_BEST_CHOICE):
PCOSTM[fid, DESIRED[fid][i]-1] = COST_PER_FAMILY[i] + N_PEOPLE[fid] * COST_PER_FAMILY_MEMBER[i]
else:
daily_occupancy[assigned_days[fid]-1] -= N_PEOPLE[fid]
offset = fids.shape[0]
solver = pywrapgraph.SimpleMinCostFlow()
for day in range(NUMBER_DAYS):
solver.SetNodeSupply(offset+day, int(daily_occupancy[day]//num_members))
for i in range(offset):
fid = fids[i]
solver.SetNodeSupply(i, -1)
for j in range(MAX_BEST_CHOICE):
day = DESIRED[fid][j]-1
solver.AddArcWithCapacityAndUnitCost(int(offset+day), i, 1, int(PCOSTM[fid, day]))
solver.SolveMaxFlowWithMinCost()
for i in range(solver.NumArcs()):
if solver.Flow(i)> 0:
assigned_days[fids[solver.Head(i)]] = solver.Tail(i)- offset + 1
print(cost_function(assigned_days)) | Santa's Workshop Tour 2019 |
7,263,353 | <find_best_model_class><EOS> | submission['assigned_day'] = assigned_days
submission.to_csv('submission.csv', index=False ) | Santa's Workshop Tour 2019 |
7,548,781 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<feature_engineering> | %%bash
git clone git://github.com/yyuu/pyenv.git ~/.pyenv
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bash_profile
echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bash_profile
echo 'eval "$(pyenv init -)"' >> ~/.bash_profile
source ~/.bash_profile
apt-get install -y libssl-dev libreadline-dev
pyenv install 2.7.17
pyenv local 2.7.17
pip install pandas numpy
wget https://mipcl-cpp.appspot.com/static/download/mipcl-py-2.6.1.linux-x86_64.tar.gz
tar --exclude='*docs' -xzvf mipcl-py-2.6.1.linux-x86_64.tar.gz
rm -f./mipcl_py/mipshell/mipcl.so
ln -s mipcl-py2.so./mipcl_py/mipshell/mipcl.so | Santa's Workshop Tour 2019 |
7,548,781 | def AgeCat(data):
data['AgeCat'] = 0
data.loc[(data['Age'] <= 5), 'AgeCat'] = 0
data.loc[(data['Age'] <= 12)&(data['Age'] > 5), 'AgeCat'] = 1
data.loc[(data['Age'] <= 18)&(data['Age'] > 12), 'AgeCat'] = 2
data.loc[(data['Age'] <= 22)&(data['Age'] > 18), 'AgeCat'] = 3
data.loc[(data['Age'] <= 32)&(data['Age'] > 22), 'AgeCat'] = 4
data.loc[(data['Age'] <= 45)&(data['Age'] > 32), 'AgeCat'] = 5
data.loc[(data['Age'] <= 60)&(data['Age'] > 45), 'AgeCat'] = 6
data.loc[(data['Age'] <= 70)&(data['Age'] > 60), 'AgeCat'] = 7
data.loc[(data['Age'] > 70), 'AgeCat'] = 8
AgeCat(df )<feature_engineering> | %%bash
source ~/.bash_profile
python <<__EOF__
def get_days(assigned_days, n_people):
days = np.zeros(assigned_days.max() , int)
for i, r in enumerate(assigned_days):
days[r-1] += n_people[i]
return days
def example_mipcl(desired, n_people):
def accounting_penalty(day, next_day):
return(day - 125.0)*(day**(0.5 + abs(day - next_day)/ 50.0)) / 400.0
FAMILY_COST = np.asarray([0,50,50,100,200,200,300,300,400,500])
MEMBER_COST = np.asarray([0, 0, 9, 9, 9, 18, 18, 36, 36,235])
num_days = desired.max()
num_families = desired.shape[0]
solver = mipshell.Problem(name='Santa2019 only preference')
C, B, I = {}, {}, {}
for fid, choices in enumerate(desired):
for cid in range(10):
B[fid, choices[cid]-1] = mipshell.Var(type=mipshell.BIN, lb=0.0, ub=1.0)
C[fid, choices[cid]-1] = FAMILY_COST[cid] + n_people[fid] * MEMBER_COST[cid]
for day in range(num_days):
I[day] = mipshell.Var(type=mipshell.INT, lb=125, ub=300)
mipshell.sum_(n_people[fid]*B[fid, day] for fid in range(num_families)if(fid,day)in B)== I[day]
for fid in range(num_families):
mipshell.sum_(B[fid, day] for day in range(num_days)if(fid,day)in B)== 1
objective = mipshell.sum_(C[fid, day]*B[fid, day] for fid, day in B)
solver.minimize(objective)
solver.optimize(silent=False, gap=0.0)
if solver.is_solution:
print("Result: ", solver.getObjVal())
assigned_days = np.zeros(num_families, int)
for fid, day in B:
if B[fid, day].val > 0.5:
assigned_days[fid] = day + 1
return assigned_days
else:
print("Failed", solver.is_solution, solver.is_infeasible, solver.isPureLP)
return None
def save(assigned_days):
with open("submission_init.csv", "w")as f:
f.write("family_id,assigned_day
")
for fid, v in enumerate(assigned_days):
f.write("{},{}
".format(fid, v))
if __name__ == "__main__":
ds = pd.read_csv('.. /input/santa-workshop-tour-2019/family_data.csv')
t = time.time()
ret = example_mipcl(ds.values[:,1:11], ds.values[:,11])
if ret is not None:
save(ret)
print("Elapsed time", time.time() - t)
__EOF__ | Santa's Workshop Tour 2019 |
7,548,781 | def AgeCatTitle(data):
data['AgeCatTitle'] = data['Title'].map(str)+ data['AgeCat'].map(str)
<filter> | Stochastic optimalization: https://www.kaggle.com/golubev/c-stochastic-product-search-65ns | Santa's Workshop Tour 2019 |
7,548,781 | df.loc[df['Embarked'].isnull() ]<feature_engineering> | %%writefile main.cpp
using namespace std;
using namespace std::chrono;
constexpr array<uint8_t, 14> DISTRIBUTION{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 5}; // You can setup how many families you need for swaps and what best choice use for each family
// {2, 5} it's mean the first random family will brute force for choices 1-2 and the second random family will brute force for choices 1-5
constexpr int MAX_OCCUPANCY = 300;
constexpr int MIN_OCCUPANCY = 125;
constexpr int BEST_N = 1000;
array<uint8_t, 5000> n_people;
array<array<uint8_t, 10>, 5000> choices;
array<array<uint16_t, 10>, 5000> PCOSTM;
array<array<double, 176>, 176> ACOSTM;
void init_data() {
ifstream in(".. /input/santa-workshop-tour-2019/family_data.csv");
assert(in && "family_data.csv");
string header;
int n,x;
char comma;
getline(in, header);
for(int j = 0; j < choices.size() ; ++j){
in >> x >> comma;
for(int i = 0; i < 10; ++i){
in >> x >> comma;
choices[j][i] = x-1;
}
in >> n;
n_people[j] = n;
}
array<int, 10> pc{0, 50, 50, 100, 200, 200, 300, 300, 400, 500};
array<int, 10> pn{0, 0, 9, 9, 9, 18, 18, 36, 36, 235};
for(int j = 0; j < PCOSTM.size() ; ++j)
for(int i = 0; i < 10; ++i)
PCOSTM[j][i] = pc[i] + pn[i] * n_people[j];
for(int i = 0; i < 176; ++i)
for(int j = 0; j < 176; ++j)
ACOSTM[i][j] = i * pow(i+125, 0.5 + abs(i-j)/ 50.0)/ 400.0;
}
array<uint8_t, 5000> read_submission(string filename){
ifstream in(filename);
assert(in && "submission_init.csv");
array<uint8_t, 5000> assigned_day{};
string header;
int id, x;
char comma;
getline(in, header);
for(int j = 0; j < choices.size() ; ++j){
in >> id >> comma >> x;
assigned_day[j] = x-1;
auto it = find(begin(choices[j]), end(choices[j]), assigned_day[j]);
if(it != end(choices[j]))
assigned_day[j] = distance(begin(choices[j]), it);
}
return assigned_day;
}
struct Index {
Index(array<uint8_t, 5000> assigned_days_): assigned_days(assigned_days_){
setup() ;
}
array<uint8_t, 5000> assigned_days;
array<uint16_t, 100> daily_occupancy_{};
int preference_cost_ = 0;
void setup() {
preference_cost_ = 0;
daily_occupancy_.fill(0);
for(int j = 0; j < assigned_days.size() ; ++j){
daily_occupancy_[choices[j][assigned_days[j]]] += n_people[j];
preference_cost_ += PCOSTM[j][assigned_days[j]];
}
}
double calc(const array<uint16_t, 5000>& indices, const array<uint8_t, DISTRIBUTION.size() >& change){
double accounting_penalty = 0.0;
auto daily_occupancy = daily_occupancy_;
int preference_cost = preference_cost_;
for(int i = 0; i < DISTRIBUTION.size() ; ++i){
int j = indices[i];
daily_occupancy[choices[j][assigned_days[j]]] -= n_people[j];
daily_occupancy[choices[j][ change[i]]] += n_people[j];
preference_cost += PCOSTM[j][change[i]] - PCOSTM[j][assigned_days[j]];
}
for(auto occupancy : daily_occupancy)
if(occupancy < MIN_OCCUPANCY)
return 1e12*(MIN_OCCUPANCY-occupancy);
else if(occupancy > MAX_OCCUPANCY)
return 1e12*(occupancy - MAX_OCCUPANCY);
for(int day = 0; day < 99; ++day)
accounting_penalty += ACOSTM[daily_occupancy[day]-125][daily_occupancy[day+1]-125];
accounting_penalty += ACOSTM[daily_occupancy[99]-125][daily_occupancy[99]-125];
return preference_cost + accounting_penalty;
}
void reindex(const array<uint16_t, DISTRIBUTION.size() >& indices, const array<uint8_t, DISTRIBUTION.size() >& change){
for(int i = 0; i < DISTRIBUTION.size() ; ++i){
assigned_days[indices[i]] = change[i];
}
setup() ;
}
};
double calc(const array<uint8_t, 5000>& assigned_days, bool print=false){
int preference_cost = 0;
double accounting_penalty = 0.0;
array<uint16_t, 100> daily_occupancy{};
for(int j = 0; j < assigned_days.size() ; ++j){
preference_cost += PCOSTM[j][assigned_days[j]];
daily_occupancy[choices[j][assigned_days[j]]] += n_people[j];
}
for(auto occupancy : daily_occupancy)
if(occupancy < MIN_OCCUPANCY)
return 1e12*(MIN_OCCUPANCY-occupancy);
else if(occupancy > MAX_OCCUPANCY)
return 1e12*(occupancy - MAX_OCCUPANCY);
for(int day = 0; day < 99; ++day)
accounting_penalty += ACOSTM[daily_occupancy[day]-125][daily_occupancy[day+1]-125];
accounting_penalty += ACOSTM[daily_occupancy[99]-125][daily_occupancy[99]-125];
if(print){
cout << preference_cost << " " << accounting_penalty << " " << preference_cost+accounting_penalty << endl;
}
return preference_cost + accounting_penalty;
}
void save_sub(const array<uint8_t, 5000>& assigned_day){
ofstream out("submission_init2.csv");
out << "family_id,assigned_day" << endl;
for(int i = 0; i < assigned_day.size() ; ++i)
out << i << "," << choices[i][assigned_day[i]]+1 << endl;
}
const vector<array<uint8_t, DISTRIBUTION.size() >> changes = []() {
vector<array<uint8_t, DISTRIBUTION.size() >> arr;
array<uint8_t, DISTRIBUTION.size() > tmp{};
for(int i = 0; true; ++i){
arr.push_back(tmp);
tmp[0] += 1;
for(int j = 0; j < DISTRIBUTION.size() ; ++j)
if(tmp[j] >= DISTRIBUTION[j]){
if(j >= DISTRIBUTION.size() -1)
return arr;
tmp[j] = 0;
++tmp[j+1];
}
}
return arr;
}() ;
template<class ExitFunction>
void stochastic_product_search(Index index, ExitFunction fn){ // 15'360'000it/s 65ns/it 0.065µs/it
double best_local_score = calc(index.assigned_days);
thread_local std::mt19937 gen(std::random_device{}());
gen.seed(1);
uniform_int_distribution<> dis(0, 4999);
array<uint16_t, 5000> indices;
iota(begin(indices), end(indices), 0);
array<uint16_t, DISTRIBUTION.size() > best_indices{};
array<uint8_t, DISTRIBUTION.size() > best_change{};
for(; fn() ;){
bool found_better = false;
for(int k = 0; k < BEST_N; ++k){
for(int i = 0; i < DISTRIBUTION.size() ; ++i)//random swap
swap(indices[i], indices[dis(gen)]);
for(const auto& change : changes){
auto score = index.calc(indices, change);
if(score < best_local_score){
found_better = true;
best_local_score = score;
best_change = change;
copy_n(begin(indices), DISTRIBUTION.size() , begin(best_indices)) ;
}
}
}
if(found_better){ // reindex from N best if found better
index.reindex(best_indices, best_change);
// save_sub(index.assigned_days);
calc(index.assigned_days, true);
}
}
save_sub(index.assigned_days);
}
int main() {
init_data() ;
auto assigned_day = read_submission("/kaggle/working/submission_init.csv");
Index index(assigned_day);
calc(index.assigned_days, true);
// auto forever = []() { return true; };
// auto count_exit = [start = 0]() mutable { return(++start <= 1000); };
auto time_exit = [start = high_resolution_clock::now() ]() {
return duration_cast<minutes>(high_resolution_clock::now() -start ).count() < 120; //2h
};
stochastic_product_search(index, time_exit);
return 0;
} | Santa's Workshop Tour 2019 |
7,548,781 | def FillEmbk(data):
var = 'Embarked'
data.loc[(data.Embarked.isnull()),'Embarked']= 'C'
FillEmbk(df )<categorify> | !g++ -pthread -lpthread -O3 -std=c++17 -o main main.cpp | Santa's Workshop Tour 2019 |
7,548,781 | <normalization><EOS> | %%time
NUMBER_DAYS = 100
NUMBER_FAMILIES = 5000
MAX_BEST_CHOICE = 5
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv')
submission = pd.read_csv('/kaggle/working/submission_init2.csv')
assigned_days = submission['assigned_day'].values
columns = data.columns[1:11]
DESIRED = data[columns].values
COST_PER_FAMILY = [0,50,50,100,200,200,300,300,400,500]
COST_PER_FAMILY_MEMBER = [0, 0, 9, 9, 9, 18, 18, 36, 36,235]
N_PEOPLE = data['n_people'].values
def get_daily_occupancy(assigned_days):
daily_occupancy = np.zeros(100, int)
for fid, assigned_day in enumerate(assigned_days):
daily_occupancy[assigned_day-1] += N_PEOPLE[fid]
return daily_occupancy
def cost_function(prediction):
N_DAYS = 100
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
penalty = 0
days = list(range(N_DAYS,0,-1))
tmp = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
family_size_dict = tmp[['n_people']].to_dict() ['n_people']
cols = [f'choice_{i}' for i in range(10)]
choice_dict = tmp[cols].to_dict()
daily_occupancy = {k:0 for k in days}
for f, d in enumerate(prediction):
n = family_size_dict[f]
choice_0 = choice_dict['choice_0'][f]
choice_1 = choice_dict['choice_1'][f]
choice_2 = choice_dict['choice_2'][f]
choice_3 = choice_dict['choice_3'][f]
choice_4 = choice_dict['choice_4'][f]
choice_5 = choice_dict['choice_5'][f]
choice_6 = choice_dict['choice_6'][f]
choice_7 = choice_dict['choice_7'][f]
choice_8 = choice_dict['choice_8'][f]
choice_9 = choice_dict['choice_9'][f]
daily_occupancy[d] += n
if d == choice_0:
penalty += 0
elif d == choice_1:
penalty += 50
elif d == choice_2:
penalty += 50 + 9 * n
elif d == choice_3:
penalty += 100 + 9 * n
elif d == choice_4:
penalty += 200 + 9 * n
elif d == choice_5:
penalty += 200 + 18 * n
elif d == choice_6:
penalty += 300 + 18 * n
elif d == choice_7:
penalty += 300 + 36 * n
elif d == choice_8:
penalty += 400 + 36 * n
elif d == choice_9:
penalty += 500 + 36 * n + 199 * n
else:
penalty += 500 + 36 * n + 398 * n
for _, v in daily_occupancy.items() :
if(v < MIN_OCCUPANCY):
penalty += 100000000
accounting_cost =(daily_occupancy[days[0]]-125.0)/ 400.0 * daily_occupancy[days[0]]**(0.5)
accounting_costs = [max(0, accounting_cost)]
diffs = [0]
yesterday_count = daily_occupancy[days[0]]
for day in days[1:]:
today_count = daily_occupancy[day]
diff = abs(today_count - yesterday_count)
accounting_costs.append(max(0,(today_count-125.0)/ 400.0 * today_count**(0.5 + diff / 50.0)))
yesterday_count = today_count
return penalty, sum(accounting_costs), penalty + sum(accounting_costs)
seed(2)
for f in range(100):
ad = assigned_days.copy()
days_for_fix = np.array(sample(range(1,101),50))
daily_occupancy = get_daily_occupancy(ad)
fids = np.where(np.isin(ad, days_for_fix)) [0]
solver = pywraplp.Solver('Setup occupation of days', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
PCOSTM, B = {}, {}
for fid in fids:
for i in range(MAX_BEST_CHOICE):
B[fid, DESIRED[fid][i]-1] = solver.BoolVar(f'b{fid, i}')
PCOSTM[fid, DESIRED[fid][i]-1] = COST_PER_FAMILY[i] + N_PEOPLE[fid] * COST_PER_FAMILY_MEMBER[i]
lower_bounds = np.zeros(100)
upper_bounds = np.zeros(100)
delta = 8
for fi in days_for_fix:
lower_bounds[fi-1] = max(daily_occupancy[fi-1]-delta,125)
upper_bounds[fi-1] = min(daily_occupancy[fi-1]+delta,300)
D = {}
for j in range(NUMBER_DAYS):
I = solver.IntVar(lower_bounds[j], upper_bounds[j], f'I{j}')
solver.Add(solver.Sum([N_PEOPLE[i] * B[i, j] for i in range(NUMBER_FAMILIES)if(i,j)in B])== I)
if upper_bounds[j]>124:
rj = range(int(lower_bounds[j]),int(upper_bounds[j])+1)
for i in rj:
D[j, i] = solver.BoolVar(f'd{j, i}')
solver.Add(solver.Sum([D[j, i]*i for i in rj])== I)
for i in fids:
solver.Add(solver.Sum(B[i, j] for j in range(NUMBER_DAYS)if(i,j)in B)== 1)
sM =solver.Sum(PCOSTM[i, j] * B[i, j] for i, j in B)
for i in range(NUMBER_DAYS):
if np.isin(i,days_for_fix-1):
ri = range(int(lower_bounds[i]),int(upper_bounds[i])+1)
if i<99:
sM += solver.Sum(D[i,j]*(j-125)/400*j**(0.5+abs(j-daily_occupancy[i+1])/50)for j in ri)
if i>0:
sM += solver.Sum(D[i,j]*(daily_occupancy[i-1]-125)/400*daily_occupancy[i-1]**(0.5+abs(j-daily_occupancy[i-1])/50)for j in ri)
solver.Minimize(sM)
sol = solver.Solve()
status = ['OPTIMAL', 'FEASIBLE', 'INFEASIBLE', 'UNBOUNDED', 'ABNORMAL', 'MODEL_INVALID', 'NOT_SOLVED']
if status[sol] == 'OPTIMAL':
for i, j in B:
if B[i, j].solution_value() > 0.5:
ad[i] = j+1
if cost_function(ad)[2]<cost_function(assigned_days)[2]:
submission['assigned_day'] = ad
assigned_days = ad
print(cost_function(ad))
score = cost_function(assigned_days)[2]
submission.to_csv(f'submission_{score}.csv', index=False)
| Santa's Workshop Tour 2019 |
7,022,143 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<train_model> | !pip install pulp | Santa's Workshop Tour 2019 |
7,022,143 | selector = SelectKBest(f_classif, len(select_features))
selector.fit(train, target)
scores = -np.log10(selector.pvalues_)
indices = np.argsort(scores)[::-1]
print('Features importance:')
for i in range(len(scores)) :
print('%.2f %s' %(scores[indices[i]], select_features[indices[i]]))<choose_model_class> | data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id' ) | Santa's Workshop Tour 2019 |
7,022,143 | numpy.random.seed(7)
model = models.Sequential()
model.add(Dense(30,input_dim=15,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(15,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(train,target,epochs=150, batch_size=20, verbose=1 )<predict_on_test> | N_FAM = len(data)
N_DAY = 100
MAX_OCC = 300
MIN_OCC = 125
cost_dict = {0: [ 0, 0],
1: [ 50, 0],
2: [ 50, 9],
3: [100, 9],
4: [200, 9],
5: [200, 18],
6: [300, 18],
7: [300, 36],
8: [400, 36],
9: [500, 36 + 199],
10: [500, 36 + 398],
}
def cost(choice, members, cost_dict):
x = cost_dict[choice]
return x[0] + members * x[1]
all_costs = {k: pd.Series([cost(k, x, cost_dict)for x in range(2,9)], index=range(2,9)) for k in cost_dict.keys() }
family_sizes = data.n_people.values.astype(np.int8)
family_cost_matrix = np.zeros(( N_DAY,N_FAM))
for i, el in enumerate(family_sizes):
family_cost_matrix[:, i] += all_costs[10][el]
for j, choice in enumerate(data.drop("n_people",axis=1 ).values[i,:]):
family_cost_matrix[choice-1, i] = all_costs[j][el]
def accounting_penalty_day(occupancy_day, occupancy_next_day):
return max(0,(( occupancy_day-MIN_OCC)/400)*occupancy_day**(0.5 +(abs(occupancy_day-occupancy_next_day)) /50))
accounting_matrix = np.zeros([MAX_OCC-MIN_OCC+1,MAX_OCC-MIN_OCC+1])
for i, x in enumerate(range(MIN_OCC,MAX_OCC+1)) :
for j, y in enumerate(range(MIN_OCC,MAX_OCC+1)) :
accounting_matrix[i,j] = accounting_penalty_day(x,y ) | Santa's Workshop Tour 2019 |
7,022,143 | prc = model.predict(train)
prc = [round(x[0])for x in prc]
accuracy_score(target,prc )<predict_on_test> | def calculate_penalty(assignments, fam_sizes=family_sizes,
fam_cost_matrix=family_cost_matrix, account_matrix=accounting_matrix):
penalty = 0
daily_occupancy = np.zeros(N_DAY+1, dtype=np.int16)
for index,(day, fam_size)in enumerate(zip(assignments, fam_sizes)) :
day_index = day-1
daily_occupancy[day_index] += fam_size
penalty += fam_cost_matrix[day_index,index]
daily_occupancy[-1] = daily_occupancy[-2]
for day in range(N_DAY):
n = daily_occupancy[day]
n_next = daily_occupancy[day+1]
violation = n < MIN_OCC or n > MAX_OCC
if violation:
penalty += 1e11
elif MIN_OCC <= n_next <= MAX_OCC:
penalty += account_matrix[n-MIN_OCC, n_next-MIN_OCC]
return penalty | Santa's Workshop Tour 2019 |
7,022,143 | snum = 0
enum = len(test)
prdt2 = model.predict(test)
prdt2 = [round(x[0])for x in prdt2]
prdt2 = list(map(int,prdt2))
print('Predicted result: ', prdt2 )<save_to_csv> | def retrieve_indices(day_or_fam_value, value_is_fam_bool):
indices = []
for index, v in enumerate(x):
var_name = v.name
var_name_split = var_name.split('_')
if value_is_fam_bool:
value = int(var_name_split[1])
else:
value = int(var_name_split[2])- 1
if value == day_or_fam_value:
indices.append(index)
return indices
santa_prob = LpProblem("SantaWorkshopTour",LpMinimize)
x = []
match_fam_sizes = []
cost_dict = {}
for fam in range(N_FAM):
day_index_array = np.argsort(family_cost_matrix[:,fam])
for day in day_index_array[0:4]:
x.append(LpVariable(f'x_{fam}_{day+1}',0,1,LpInteger))
match_fam_sizes.append(family_sizes[fam])
cost_dict[x[-1]] = family_cost_matrix[day,fam]
z = []
for day in range(N_DAY):
z.append(LpVariable(f'z_{day+1}',0,1,LpInteger))
santa_prob += lpSum([cost_dict[value]*x[index] for index, value in enumerate(x)])
M = MAX_OCC-MIN_OCC
for day in range(N_DAY):
fam_indices = retrieve_indices(day, False)
santa_prob += lpSum([match_fam_sizes[fam_index]*x[fam_index] for fam_index in fam_indices])>= MIN_OCC
santa_prob += lpSum([match_fam_sizes[fam_index]*x[fam_index] for fam_index in fam_indices])- M*z[day] <= MIN_OCC
for fam in range(N_FAM):
day_indices = retrieve_indices(fam, True)
santa_prob += lpSum([x[day_index] for day_index in day_indices])== 1
max_diff = 175
correction = 120
alpha = 0.2
rhs = max_diff + alpha*MIN_OCC
for day in range(N_DAY-1):
fam_indices = retrieve_indices(day, False)
fam_indices_plus = retrieve_indices(day+1, False)
santa_prob +=(lpSum([match_fam_sizes[fam_index]*(1+alpha)*x[fam_index] for fam_index in fam_indices])
-lpSum([match_fam_sizes[fam_index]*x[fam_index] for fam_index in fam_indices_plus])
+ correction*z[day] <= rhs)
santa_prob +=(lpSum([match_fam_sizes[fam_index]*x[fam_index] for fam_index in fam_indices_plus])
-lpSum([match_fam_sizes[fam_index]*(1-alpha)*x[fam_index] for fam_index in fam_indices])
+ correction*z[day] <= rhs ) | Santa's Workshop Tour 2019 |
7,022,143 | sampl['Survived'] = pd.DataFrame(prdt2)
sampl.to_csv('submission.csv', index=False )<set_options> | time_lim = 10000
messaging = 1
gap = 0
santa_prob.solve(pulp.PULP_CBC_CMD(maxSeconds=time_lim, msg=messaging, fracGap=gap))
print('Status: ' + str(LpStatus[santa_prob.status])+ ', Value: ' + str(value(santa_prob.objective)))
solution_df = pd.DataFrame(np.zeros(N_FAM), columns = ['assigned_day'], dtype=np.int8)
for index, v in enumerate(santa_prob.variables()):
var_name = v.name
if v.varValue == 1 and 'x' in var_name:
var_name_split = var_name.split('_')
fam = int(var_name_split[1])
day = int(var_name_split[2])
solution_df.iat[fam,0] = day
solution = solution_df['assigned_day']
solution_penalty = calculate_penalty(solution)
solution_df.to_csv(f'SantaPuLP_{int(solution_penalty)}.csv', index_label='family_id' ) | Santa's Workshop Tour 2019 |
7,022,143 | %matplotlib inline<load_from_csv> | def local_search(best_solution, best_penalty, time_limit):
start = timer()
time_since_start = 0
current_solution = best_solution.copy()
current_penalty = best_penalty
new_solution = best_solution.copy()
new_penalty = best_penalty
choice_index = 0
while time_since_start < time_limit:
improvement = False
random_permutation = np.random.permutation(N_FAM)
for fam in random_permutation:
time_since_start = round(( timer() -start))
if time_since_start > time_limit:
break
if current_solution[fam] == data.iat[fam,choice_index]:
continue
new_solution = current_solution.copy()
new_solution[fam] = data.iat[fam,choice_index]
new_penalty = calculate_penalty(new_solution)
if new_penalty < current_penalty:
current_solution = new_solution.copy()
current_penalty = new_penalty
if new_penalty < best_penalty:
best_solution = new_solution.copy()
best_penalty = new_penalty
improvement = True
if improvement:
choice_index = 0
else:
choice_index += 1
if choice_index == 10:
return best_solution
return best_solution | Santa's Workshop Tour 2019 |
7,022,143 | <count_missing_values><EOS> | ls_time_limit = 900
final_solution = local_search(solution, solution_penalty, ls_time_limit)
final_solution_penalty = calculate_penalty(final_solution)
final_solution_df = pd.DataFrame(final_solution, columns=['assigned_day'])
final_solution_df.to_csv(f'SantaPuLP_{int(final_solution_penalty)}.csv', index_label='family_id' ) | Santa's Workshop Tour 2019 |
7,389,628 | <SOS> metric: SantaWorkshopSchedule2019 Kaggle data source: santas-workshop-tour-2019<feature_engineering> | %matplotlib inline
NUMBER_DAYS = 100
NUMBER_FAMILIES = 5000
data = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv')
submission = pd.read_csv(".. /input/santa-submission-69818/submission_69818.csv")
assigned_days = submission['assigned_day'].values
columns = data.columns[1:11]
DESIRED = data[columns].values
COST_PER_FAMILY = [0,50,50,100,200,200,300,300,400,500]
COST_PER_FAMILY_MEMBER = [0, 0, 9, 9, 9, 18, 18, 36, 36,235]
N_PEOPLE = data['n_people'].values
def get_daily_occupancy(assigned_days):
daily_occupancy = np.zeros(100, int)
for fid, assigned_day in enumerate(assigned_days):
daily_occupancy[assigned_day-1] += N_PEOPLE[fid]
return daily_occupancy
def cost_function(prediction):
N_DAYS = 100
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
penalty = 0
days = list(range(N_DAYS,0,-1))
tmp = pd.read_csv('/kaggle/input/santa-workshop-tour-2019/family_data.csv', index_col='family_id')
family_size_dict = tmp[['n_people']].to_dict() ['n_people']
cols = [f'choice_{i}' for i in range(10)]
choice_dict = tmp[cols].to_dict()
daily_occupancy = {k:0 for k in days}
for f, d in enumerate(prediction):
n = family_size_dict[f]
choice_0 = choice_dict['choice_0'][f]
choice_1 = choice_dict['choice_1'][f]
choice_2 = choice_dict['choice_2'][f]
choice_3 = choice_dict['choice_3'][f]
choice_4 = choice_dict['choice_4'][f]
choice_5 = choice_dict['choice_5'][f]
choice_6 = choice_dict['choice_6'][f]
choice_7 = choice_dict['choice_7'][f]
choice_8 = choice_dict['choice_8'][f]
choice_9 = choice_dict['choice_9'][f]
daily_occupancy[d] += n
if d == choice_0:
penalty += 0
elif d == choice_1:
penalty += 50
elif d == choice_2:
penalty += 50 + 9 * n
elif d == choice_3:
penalty += 100 + 9 * n
elif d == choice_4:
penalty += 200 + 9 * n
elif d == choice_5:
penalty += 200 + 18 * n
elif d == choice_6:
penalty += 300 + 18 * n
elif d == choice_7:
penalty += 300 + 36 * n
elif d == choice_8:
penalty += 400 + 36 * n
elif d == choice_9:
penalty += 500 + 36 * n + 199 * n
else:
penalty += 500 + 36 * n + 398 * n
for _, v in daily_occupancy.items() :
if(v < MIN_OCCUPANCY):
penalty += 100000000
accounting_cost =(daily_occupancy[days[0]]-125.0)/ 400.0 * daily_occupancy[days[0]]**(0.5)
accounting_costs = [max(0, accounting_cost)]
diffs = [0]
yesterday_count = daily_occupancy[days[0]]
for day in days[1:]:
today_count = daily_occupancy[day]
diff = abs(today_count - yesterday_count)
accounting_costs.append(max(0,(today_count-125.0)/ 400.0 * today_count**(0.5 + diff / 50.0)))
yesterday_count = today_count
return penalty, sum(accounting_costs), penalty + sum(accounting_costs ) | Santa's Workshop Tour 2019 |
7,389,628 | <groupby><EOS> | MAX_BEST_CHOICE = 6
NUM_SWAP = 3000
NUM_SECONDS = 3600
NUM_THREADS = 4
for _ in range(40):
solver = pywraplp.Solver('Optimization preference cost', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
daily_occupancy = get_daily_occupancy(assigned_days ).astype(float)
fids = np.random.choice(range(NUMBER_FAMILIES), NUM_SWAP, replace=False)
PCOSTM, B = {}, {}
for fid in range(NUMBER_FAMILIES):
if fid in fids:
for i in range(MAX_BEST_CHOICE):
PCOSTM[fid, DESIRED[fid][i]-1] = COST_PER_FAMILY[i] + N_PEOPLE[fid] * COST_PER_FAMILY_MEMBER[i]
B[ fid, DESIRED[fid][i]-1] = solver.BoolVar('')
else:
daily_occupancy[assigned_days[fid]-1] -= N_PEOPLE[fid]
solver.set_time_limit(NUM_SECONDS*NUM_THREADS*1000)
solver.SetNumThreads(NUM_THREADS)
for day in range(NUMBER_DAYS):
if daily_occupancy[day]:
solver.Add(solver.Sum([N_PEOPLE[fid] * B[fid, day] for fid in range(NUMBER_FAMILIES)if(fid,day)in B])== daily_occupancy[day])
for fid in fids:
solver.Add(solver.Sum(B[fid, day] for day in range(NUMBER_DAYS)if(fid, day)in B)== 1)
solver.Minimize(solver.Sum(PCOSTM[fid, day] * B[fid, day] for fid, day in B))
sol = solver.Solve()
status = ['OPTIMAL', 'FEASIBLE', 'INFEASIBLE', 'UNBOUNDED', 'ABNORMAL', 'MODEL_INVALID', 'NOT_SOLVED']
if status[sol] in ['OPTIMAL', 'FEASIBLE']:
tmp = assigned_days.copy()
for fid, day in B:
if B[fid, day].solution_value() > 0.48:
tmp[fid] = day+1
if cost_function(tmp)[2] < cost_function(assigned_days)[2]:
assigned_days = tmp
submission['assigned_day'] = assigned_days
submission.to_csv('submission.csv', index=False)
print('Result:', status[sol], cost_function(tmp))
else:
print('Result:', status[sol] ) | Santa's Workshop Tour 2019 |
13,184,958 | <SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<concatenate> | import time
import pandas as pd
import torch | Conway's Reverse Game of Life 2020 |
13,184,958 | useful.append('Fare' )<feature_engineering> | torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False | Conway's Reverse Game of Life 2020 |
13,184,958 | df['small_family'] = df['family_size'].apply(lambda size: 1 if size<=4 else 0)
df['big_family'] = df['family_size'].apply(lambda size: 1 if size>=7 else 0)
df['no_family'] = df['family_size'].apply(lambda s: 1 if s==1 else 0)
useful.extend(['SibSp', 'Parch', 'family_size', 'small_family', 'big_family', 'no_family'] )<categorify> | N = 25
device = 'cuda'
TEST_CSV = '.. /input/conways-reverse-game-of-life-2020/test.csv'
OUTPUT_CSV = 'submission.csv' | Conway's Reverse Game of Life 2020 |
13,184,958 | df['male'] = df['Sex'].map({'male': 1, 'female': 0})
useful.append('male' )<categorify> | cv = torch.nn.Conv2d(1, 1, kernel_size=3, padding=1, padding_mode='circular', bias=False)
cv.requires_grad=False
cv.weight = torch.nn.Parameter(
torch.tensor(
[[[[ 1., 1., 1.],
[ 1., 0., 1.],
[ 1., 1., 1.]]]],
device=device,
dtype=torch.float16
),
requires_grad=False,
)
@torch.jit.script
def forward(grid, delta: int):
N=25
g = grid.reshape(-1, 1, N, N)
for _ in torch.arange(delta):
g = g.to(torch.float16)
neighbor_sum = cv(g)
g =(( neighbor_sum == 3)|(( g == 1)&(neighbor_sum == 2)))
return g.reshape(-1, N, N ) | Conway's Reverse Game of Life 2020 |
13,184,958 | df = pd.concat([df, pd.get_dummies(df['Embarked'], prefix='embarked')], axis=1)
useful.extend(['embarked_{}'.format(x)for x in ['C', 'S', 'Q']])
df.sample(5 )<concatenate> | @torch.jit.script
def random_parents(n_parents: int, device: str):
N = 25
RANDOM_ALIVE =.2
return torch.rand(( n_parents, N, N), device=device)>(1-RANDOM_ALIVE ) | Conway's Reverse Game of Life 2020 |
13,184,958 | useful.append('Pclass' )<categorify> | @torch.jit.script
def loss(input, target):
return torch.sum(input ^ target, dim=(-1,-2)) | Conway's Reverse Game of Life 2020 |
13,184,958 | df = pd.concat([df, pd.get_dummies(df['cabin_letter'], prefix='deck')], axis=1)
letters = df['cabin_letter'].unique()
useful.extend(['deck_{}'.format(x)for x in letters] )<sort_values> | @torch.jit.script
def select_best(parents, delta: int, target, n_best: int):
scores = loss(forward(parents, delta), target)
best_values, best_indices = torch.topk(scores, n_best, dim=0, largest=False, sorted=True)
new_parents = parents[best_indices,...]
return new_parents, best_values[0], new_parents[0,...] | Conway's Reverse Game of Life 2020 |
13,184,958 | ticket_count = df[['Ticket', 'Name']].groupby('Ticket' ).count().rename(columns={'Name':'count'} ).sort_values(by='count', ascending=False)
ticket_count.head()<filter> | @torch.jit.script
def random_combine(parents, n_offsprings: int, device: str, pre_masks):
N = 25
dads = torch.randint(low=0, high=parents.shape[0], size=(n_offsprings,),
device=device, dtype=torch.long)
dads = parents[dads,...]
moms = torch.randint(low=0, high=parents.shape[0], size=(n_offsprings,),
device=device, dtype=torch.long)
moms = parents[moms,...]
masks = pre_masks[torch.randint(low=0, high=pre_masks.shape[0], size=(n_offsprings,),
device=device, dtype=torch.long)]
return torch.where(masks, dads, moms ) | Conway's Reverse Game of Life 2020 |
13,184,958 | df[df['Ticket']=='CA.2343']<filter> | def precomputes_masks() :
N = 25
BLOCK_SIZE = 17
block = torch.nn.Conv2d(1, 1, kernel_size=BLOCK_SIZE, padding=BLOCK_SIZE//2,
padding_mode='circular', bias=False)
block.requires_grad=False
block.weight = torch.nn.Parameter(
torch.ones(( 1, 1, BLOCK_SIZE, BLOCK_SIZE),
device=device,
dtype=torch.float16
),
requires_grad=False,
)
masks = torch.zeros(( N * N, 1, N, N), device=device, dtype=torch.float16)
for x in range(N):
for y in range(N):
masks[x * N + y, 0, x, y] = 1.
masks = block(masks)
return masks[:, 0,...] >.5 | Conway's Reverse Game of Life 2020 |
13,184,958 | df[df['Ticket']=='1601']<feature_engineering> | @torch.jit.script
def mutate(parents, device: str):
MUTATION =.0016
mutations = torch.rand(parents.shape, device=device)< MUTATION
return parents ^ mutations | Conway's Reverse Game of Life 2020 |
13,184,958 | df['ticket_owners'] = df['Ticket'].apply(lambda x: ticket_count.loc[x])
df['shared_fare'] = df['Fare'] / df['ticket_owners']
df['alone'] = df[['ticket_owners','no_family']].apply(lambda row: 1 if row.ticket_owners==1 and row.no_family==1 else 0 , axis=1)
useful.extend(['ticket_owners', 'shared_fare', 'alone'] )<feature_engineering> | @torch.jit.script
def optimize_one_puzzle(delta: int, data, device: str, pre_masks):
N = 25
N_GENERATION = 30
P = 4_500
N_BEST = P // 30
N_ELITES = 8
best_score = torch.tensor([N*N], device=device)
best = torch.zeros(( N,N), device=device ).to(torch.bool)
parents = random_parents(P, device)
elites = torch.empty(( 1, N, N), dtype=torch.bool, device=device)
elites[0,...] = data
for i in range(N_GENERATION):
parents = random_combine(parents, P, device, pre_masks)
parents = mutate(parents, device)
parents[:N_ELITES,...] = elites
parents, best_score, best = select_best(parents, delta, data, N_BEST)
elites = parents[:N_ELITES,...]
if best_score == 0:
break
return best_score, best | Conway's Reverse Game of Life 2020 |
13,184,958 | older_age = df[['Ticket', 'Age']].groupby('Ticket' ).max()
df['older_relative_age'] = df['Ticket'].apply(lambda ticket: older_age.loc[ticket])
useful.extend(['older_relative_age'] )<feature_engineering> | @torch.jit.script
def optimize_all_puzzles(deltas, df, device: str, pre_masks):
sub = df.clone()
for n in torch.arange(df.shape[0]):
delta = deltas[n]
data = df[n,...]
_, sub[n,...] = optimize_one_puzzle(delta, data, device, pre_masks)
return sub | Conway's Reverse Game of Life 2020 |
13,184,958 | def ticket_type(t):
if re.match('^\d+$', t):
return 'len' + str(len(t))
else:
return re.sub('[^A-Z]', '', t)
df['ticket_type'] = df['Ticket'].apply(ticket_type)
df[['ticket_type', 'Survived']].groupby(
'ticket_type' ).agg({'Survived': ['mean', 'std','count']} ).sort_values(( 'Survived','count'), ascending=False )<categorify> | df = pd.read_csv(TEST_CSV, index_col='id' ) | Conway's Reverse Game of Life 2020 |
13,184,958 | def useful_ticket_type(ticket_type):
useful_types = ['A', 'SOTONOQ', 'WC']
if ticket_type in useful_types:
return ticket_type
else:
return 'other'
df['useful_ticket_type'] = df['ticket_type'].apply(useful_ticket_type)
df = pd.concat(
[df, pd.get_dummies(df['useful_ticket_type'], prefix='ticket_type')], axis=1)
letters = df['useful_ticket_type'].unique()
useful.extend(['ticket_type_{}'.format(x)for x in letters])
df.sample(10 )<feature_engineering> | submission = df.copy()
submission.drop(['delta'], inplace=True, axis=1 ) | Conway's Reverse Game of Life 2020 |
13,184,958 | df['name_length_short'] = df['name_length'].apply(lambda s: 1 if s <= 35 else 0)
df['name_length_mid'] = df['name_length'].apply(lambda s: 1 if 35 < s <=58 else 0)
df['name_length_long'] = df['name_length'].apply(lambda s: 1 if s > 58 else 0)
useful.extend(['name_length', 'name_length_short', 'name_length_mid', 'name_length_long'] )<feature_engineering> | indexes = df.index
deltas = torch.from_numpy(df.delta.values ).to(device)
df = torch.BoolTensor(df.values[:, 1:].reshape(( -1, N, N)) ).to(device ) | Conway's Reverse Game of Life 2020 |
13,184,958 | df['lang'] = df['Name'].apply(lambda n: langid.classify(n)[0])
df[['Name','lang']].sample(10 )<groupby> | start_time = time.time()
pre_masks = precomputes_masks()
sub = optimize_all_puzzles(deltas, df, device, pre_masks)
print(f'Processed {sub.shape[0]:,} puzzles in {time.time() - start_time:.2f} seconds 🔥🔥🔥' ) | Conway's Reverse Game of Life 2020 |
13,184,958 | lang_count = df[['lang','Name']].groupby('lang' ).count().rename(columns={'Name':'count'})
lang_class = df[['lang','Pclass']].groupby('lang' ).mean()
lang_survived = df[['lang','Survived']].groupby('lang' ).mean()
pd.concat([lang_count, lang_class, lang_survived], axis=1 ).sort_values(by='count', ascending=False ).head(15 )<feature_engineering> | submission.rename(columns={f'stop_{x}': f'start_{x}' for x in range(N*N)}, inplace=True)
submission.iloc[:sub.shape[0], :] = sub.reshape(( -1, N*N)).cpu().numpy().astype(int)
submission.to_csv(OUTPUT_CSV ) | Conway's Reverse Game of Life 2020 |
13,184,958 | language_groups = {
'uk':('cy', 'en'),
'germanic':('da', 'de', 'nl'),
'latin':('es', 'fr', 'it', 'la', 'pt', 'br', 'ro'),
'african':('af', 'rw', 'xh'),
'asian':('id', 'tl', 'tr')
}
language_map = { y:x for x in language_groups for y in language_groups[x]}
df['lang_group'] = df['lang'].apply(lambda l: language_map[l] if l in language_map else 'other')
survived_avg_per_group = df[['lang_group','Survived']].groupby('lang_group' ).mean()
survived_std_per_group = df[['lang_group','Survived']].groupby('lang_group' ).std().rename(columns={'Survived':'std'})
pd.concat([survived_avg_per_group, survived_std_per_group], axis=1 )<categorify> | def leaderboard_score(deltas, df, sub, device: str):
result = torch.empty(sub.shape[0], device=device, dtype=torch.long)
for delta in range(1, 6):
start = sub[deltas == delta]
end = df[deltas == delta]
result[deltas == delta] = loss(forward(start, delta), end)
print('Leaderboard score:', torch.sum(result ).item() /(result.shape[0]*N*N)) | Conway's Reverse Game of Life 2020 |
13,184,958 | df = pd.concat([df, pd.get_dummies(df['lang_group'], prefix='lang_group')], axis=1)
langs = df['lang_group'].unique()
useful.extend(['lang_group_{}'.format(x)for x in langs] )<feature_engineering> | leaderboard_score(deltas, df, sub, device ) | Conway's Reverse Game of Life 2020 |
13,173,972 | surnames = df[['surname', 'Name']].groupby('surname' ).count().rename(columns={'Name':'count'})
df['surname_count'] = df['surname'].apply(lambda x: surnames.loc[x])
useful.append('surname_count' )<sort_values> | %%writefile main-RT.cpp
extern "C" {
};
using namespace std;
constexpr int n = 25;
constexpr int N = n * n;
using CW = vector<uint8_t>;
using TStamp = std::chrono::time_point<std::chrono::system_clock>;
struct task {
string id;
CW field;
int steps;
};
random_device rd;
mt19937 g(rd());
static atomic<bool> is_stopped;
static atomic<bool> is_timeouted;
double total_timeout = 8.5 * 60 * 60;
TStamp app_started = std::chrono::system_clock::now() ;
void terminate(int code){
is_stopped = true;
is_timeouted = true;
exit(code);
}
vector<string> split(std::string s, char delimiter){
s += delimiter;
vector<string> result;
size_t start = 0;
for(size_t i = start; i < s.size() ; ++i){
while(s[i] != delimiter){
++i;
}
result.push_back(s.substr(start, i - start)) ;
start = i + 1;
}
return result;
}
CW s2vec(const std::string& s){
CW v(N);
if(static_cast<int>(s.size())!= N + N - 1){
cerr << "wtf s2vec format len" << s.size() << endl;
terminate(1);
}
for(int i = 0; i < N; ++i){
v[i] = s[i + i] == '1';
}
return v;
}
string vec2s(const CW& v){
string res;
for(int i = 0; i < N; ++i){
if(v[i]){
res += "1,";
} else {
res += "0,";
}
}
res.resize(res.size() - 1);
return res;
}
int weight(const task& t){
int count = 0;
for(auto k : t.field){
if(k)++count;
}
return count;
}
vector<int> init_nums(int N){
vector<int> values;
for(int i = 0; i < N; ++i){
values.push_back(i);
}
return values;
}
const vector<int> n_nums = init_nums(N);
bool calc(const CW& field, int pos, int x, int y)noexcept {
static const int dx8[] = {-1, -1, -1, 0, 0, 1, 1, 1};
static const int dy8[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int count = 0;
for(int i = 0; i < 8; ++i){
int a = x + dx8[i];
int b = y + dy8[i];
if(a < 0)a += n;
else if(a >= n)a -= n;
if(b < 0)b += n;
else if(b >= n)b -= n;
if(field[b * n + a]){
++count;
}
}
return(count | field[pos])== 3;
}
bool calc_left_line(const CW& field, size_t pos)noexcept {
static const size_t d_left8[] = { -1ULL, -25ULL, -24ULL, +24, +1, +25 + 24, +25, +26};
uint8_t count = 0;
for(int i = 0; i < 8; ++i){
count += field[pos + d_left8[i]];
}
return(count | field[pos])== 3;
}
bool calc_right_line(const CW& field, size_t pos)noexcept {
static const size_t d_right8[] = { -26ULL, -25ULL, - 49ULL, -1ULL, -24ULL, +24, +25, +1};
uint8_t count = 0;
for(int i = 0; i < 8; ++i){
count += field[pos + d_right8[i]];
}
return(count | field[pos])== 3;
}
bool calc_bottom_line(const CW& field, size_t pos)noexcept {
static const size_t d_bottom8[] = { -26ULL, -25ULL, -24ULL, -1ULL, 1, -599ULL, -600ULL, -601ULL};
uint8_t count = 0;
for(int i = 0; i < 8; ++i){
count += field[pos + d_bottom8[i]];
}
return(count | field[pos])== 3;
}
bool calc_top_line(const CW& field, size_t pos)noexcept {
static const size_t d_top8[] = { +599, +600, +601, -1ULL, 1, +24, +25, +26};
uint8_t count = 0;
for(int i = 0; i < 8; ++i){
count += field[pos + d_top8[i]];
}
return(count | field[pos])== 3;
}
bool calc_no_checks(const CW& field, size_t pos)noexcept {
static const size_t di8[] = {0, 1, 2, 25, 27, +50, +51, +52};
uint8_t count = 0;
for(int i = 0; i < 8; ++i){
count += field[pos + di8[i]];
}
return(count | field[pos + 26])== 3;
}
CW run_step(CW start)noexcept {
thread_local CW next(N);
for(int x = 1; x < n - 1; ++x){
next[x] = calc_top_line(start, x);
next[600 + x] = calc_bottom_line(start, 600 + x);
next[x * 25] = calc_left_line(start, x * 25);
next[x * 25 + 24] = calc_right_line(start, x * 25 + 24);
}
size_t pos = 0;
for(int y = 1; y < n - 1; ++y){
for(int x = 1; x < n - 1; ++x){
next[pos + 26] = calc_no_checks(start, pos);
++pos;
}
pos += 2;
}
for(auto i : {0, n-1, N - n, N - 1}){
next[i] = calc(start, i, i % n, i / n);
}
return next;
}
void run_step_partial(CW* steps, int x, int y, int step, vector<pair<int, int>>& changelog){
int range = step + 1;
for(int b = y - range; b <= y + range; ++b){
int q = b;
if(q < 0)q += n;
else if(q >= n)q -= n;
for(int a = x - range; a <= x + range; ++a){
int p = a;
if(p < 0)p += n;
else if(p >= n)p -= n;
size_t i = q * n + p;
bool not_vert_side = p > 0 && p + 1 < n;
bool not_hori_side = q > 0 && q + 1 < n;
bool value;
if(not_vert_side && not_hori_side){
value = calc_no_checks(steps[step], i - 26);
} else {
bool not_vert_side = p > 0 && p + 1 < n;
bool not_hori_side = q > 0 && q + 1 < n;
if(not_vert_side && q + 1 == n){
value = calc_bottom_line(steps[step], i);
} else if(not_vert_side && q == 0){
value = calc_top_line(steps[step], i);
} else if(not_hori_side && p == 0){
value = calc_left_line(steps[step], i);
} else if(not_hori_side && p + 1 == n){
value = calc_right_line(steps[step], i);
} else {
value = calc(steps[step], i, p, q);
}
}
bool previous = steps[step + 1][i];
if(value != previous){
changelog.push_back({step + 1, i});
steps[step + 1][i] = value;
}
}
}
}
int run_step_partial_last(CW* steps, int x, int y, int step, vector<pair<int, int>>& changelog, const CW& original){
int range = step + 1;
int diff = 0;
for(int b = y - range; b <= y + range; ++b){
int q = b;
if(q < 0)q += n;
else if(q >= n)q -= n;
for(int a = x - range; a <= x + range; ++a){
int p = a;
if(p < 0)p += n;
else if(p >= n)p -= n;
size_t i = q * n + p;
bool not_vert_side = p > 0 && p + 1 < n;
bool not_hori_side = q > 0 && q + 1 < n;
bool value;
if(not_vert_side && not_hori_side){
value = calc_no_checks(steps[step], i - 26);
} else {
if(not_vert_side && q + 1 == n){
value = calc_bottom_line(steps[step], i);
} else if(not_vert_side && q == 0){
value = calc_top_line(steps[step], i);
} else if(not_hori_side && p == 0){
value = calc_left_line(steps[step], i);
} else if(not_hori_side && p + 1 == n){
value = calc_right_line(steps[step], i);
} else {
value = calc(steps[step], i, p, q);
}
}
bool previous = steps[step + 1][i];
if(value != previous){
if(value == original[i]){
++diff;
} else {
--diff;
}
changelog.push_back({step + 1, i});
steps[step + 1][i] = value;
}
}
}
return diff;
}
void revert_changes(CW* steps, const vector<pair<int, int>>& changelog){
for(auto i : changelog){
steps[i.first][i.second] = !steps[i.first][i.second];
}
}
void print(const CW& field){
for(int pos = 0; pos < N; ++pos){
cout <<(field[pos] ? '*' : '.'); //static_cast<int>(field[pos]);
if(pos % n == n - 1){
cout << endl;
}
}
cout << endl;
}
int validate(CW start, const task& t){
for(int i = 0; i < t.steps; ++i){
start = run_step(start);
}
int count = 0;
for(int i = 0; i < N; ++i){
if(start[i] == t.field[i])
++count;
}
return count;
}
int validate_partial(const task& t, CW* steps, int x, int y, vector<pair<int, int>>& changelog){
changelog.clear() ;
for(int i = 0; i < t.steps - 1; ++i){
run_step_partial(steps, x, y, i, changelog);
}
int diff = run_step_partial_last(steps, x, y, t.steps - 1, changelog, t.field);
return diff;
}
static class Cache : private boost::noncopyable {
private:
map<string, pair<int, string>> cache;
mutex m;
public:
Cache() { }
auto get(const std::string& id){
lock_guard<mutex> g(m);
return cache[id];
}
bool set(const pair<int, CW>& value, const task& t){
const std::string& id = t.id;
{
lock_guard<mutex> g(m);
if(cache.find(id)!= cache.end() && cache[id].first >= value.first){
return false;
}
}
int score = validate(value.second, t);
if(score != value.first){
cerr << "wtf score for " << id << ": " << score << ' ' << value.first << endl;
for(auto i : value.second){
cerr <<(int)i << ' ';
}
cerr << endl;
throw std::runtime_error("wtf score");
}
string s = vec2s(value.second);
{
lock_guard<mutex> g(m);
cache[id] = {value.first, s};
ofstream cache_file("processed/cache-real-time", std::ofstream::app);
cache_file << id << ' ' << value.first << ' ' << s << endl;
return true;
}
}
} global_cache;
CW prepare_mask(CW solution, const task& t){
for(int i = 0; i < t.steps; ++i){
solution = run_step(solution);
}
vector<int> diff;
for(int i = 0; i < N; ++i){
if(t.field[i] != solution[i]){
diff.push_back(i);
}
}
for(int i = 0; i < t.steps; ++i){
vector<int> next_diff;
for(auto pos : diff){
next_diff.push_back(i);
int x = pos % n;
int y = pos / n;
static const int dx8[] = {-1, -1, -1, 0, 0, 1, 1, 1};
static const int dy8[] = {-1, 0, 1, -1, 1, -1, 0, 1};
for(int i = 0; i < 8; ++i){
int a = x + dx8[i];
int b = y + dy8[i];
if(a < 0)a += n;
else if(a >= n)a -= n;
if(b < 0)b += n;
else if(b >= n)b -= n;
next_diff.push_back(b * n + a);
}
}
diff = next_diff;
sort(diff.begin() , diff.end());
{
auto last = std::unique(diff.begin() , diff.end());
diff.erase(last, diff.end());
}
}
CW result(N);
for(auto i : diff){
result[i] = true;
}
// cerr << diff.size() << " for " << global_cache.get(t.id ).first << endl;
return result;
}
CW shake_single_if_equal(CW _field, const task& t, bool fast = false){
CW steps[t.steps + 1];
steps[0] = _field;
CW& field = steps[0];
vector<pair<int, int>> changelog;
int best_score = validate(field, t);
auto best_pos = field;
int score = best_score;
int current_score = best_score;
for(int i = 0; i < t.steps; ++i){
steps[i + 1] = run_step(steps[i]);
}
CW mask_to_shake = prepare_mask(best_pos, t);
int default_tries = fast ? 39 : 99;
int default_range = fast ? 2 : 3;
int tries = default_tries;
int range = default_range;
do {
if(score != best_score){
tries = default_tries;
score = best_score;
range = default_range;
}
if(tries % 50 == 0)range -= 1;
int shift = rand() % N;
bool dir = rand() % 2;
int i = shift;
int x = i % n;
int y = i / n;
for(int index = 0; index < N; ++index){
if(dir){
++i;
++x;
if(x == n){
x = 0;
++y;
if(y == n)y = 0;
}
if(i == N)i = 0;
} else {
--i;
--x;
if(x == -1){
x = n - 1;
--y;
if(y == -1)y = n - 1;
}
if(i == N)i = 0;
if(i < 0)i = N - 1;
}
if(!mask_to_shake[i] && tries % 5 != 0)continue; /// _---------- RT_ADJ
bool value = field[i];
field[i] = !field[i];
int new_score = current_score + validate_partial(t, steps, x, y, changelog);
if(new_score > best_score){
best_score = new_score;
best_pos = field;
global_cache.set({best_score, best_pos}, t);
} else if(new_score < best_score - range){
field[i] = !field[i];
} else {
if(rand() % 4 == 0){
field[i] = !field[i];
}
}
if(value == field[i]){
revert_changes(steps, changelog);
} else {
current_score = new_score;
}
}
} while(best_score != score || tries-- > 0);
return best_pos;
}
CW shake_k_and_single(const task &t, CW start, int tries, bool fast = false){
auto next_start = start;
vector<int> indexes = n_nums;
shuffle(indexes.begin() , indexes.end() , g);
for(int i = 0; i < tries; ++i){
next_start[indexes[i]] = !next_start[indexes[i]];
}
next_start = shake_single_if_equal(next_start, t, true);
return shake_single_if_equal(next_start, t, fast);
}
CW make_random() {
CW c(N);
auto indexes = n_nums;
shuffle(indexes.begin() , indexes.end() , g);
indexes.resize(N / 2);
for(auto i : indexes){
c[i] = 1;
}
return c;
}
CW make_checked() {
CW c(N);
for(size_t i = 0; i < N; i += 2){
c[i] = true;
}
return c;
}
namespace CNF_solver {
using CNF = std::vector<std::vector<int>>;
bool alive(const CW& field, int pos, int x, int y){
static const int dx8[] = {-1, -1, -1, 0, 0, 1, 1, 1};
static const int dy8[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int count = 0;
for(int i = 0; i < 8; ++i){
int a = x + dx8[i];
int b = y + dy8[i];
if(a < 0)a += n;
else if(a >= n)a -= n;
if(b < 0)b += n;
else if(b >= n)b -= n;
if(field[b * n + a]){
++count;
}
}
return(count | field[pos])== 3;
}
vector<int> generate_positions(int x, int y){
static const int dx8[] = {-1, -1, -1, 0, 0, 1, 1, 1};
static const int dy8[] = {-1, 0, 1, -1, 1, -1, 0, 1};
vector<int> result;
result.push_back(-9999); // place holder
for(int i = 0; i < 8; ++i){
int a = x + dx8[i];
int b = y + dy8[i];
if(a < 0)a += n;
else if(a >= n)a -= n;
if(b < 0)b += n;
else if(b >= n)b -= n;
result.push_back(b * n + a);
}
return result;
}
// CNF: all values false: through negative
bool geti(uint32_t value, int index){
return(value &(1 << index)) ;
}
CNF universal_cnf() {
CNF result_cnf;
uint32_t mask = 0;
while(!geti(mask, 10)) {
int count = 0;
for(int i = 0; i < 8; ++i){
if(geti(mask, i)) {
++count;
}
}
bool alive =(count | geti(mask, 8)) == 3;
bool is_true =(alive == geti(mask, 9)) ;
if(!is_true){
vector<int> next;
for(int i = 0; i <= 9; ++i){
if(geti(mask, i)) {
next.push_back(-i - 1);
} else {
next.push_back(i + 1);
}
}
result_cnf.push_back(next);
}
sort(result_cnf.begin() , result_cnf.end());
++mask;
}
// 325065 CNF -> 160k CNF
// speed increased 1.6 ; 30 ->
// optimize_cnf(result_cnf);
// Optimized much better externally by python tool sympy:
// $ python3
// >>>
// >>> x, y, z = symbols('x y z')
// >>> simplify_logic(( x & y & ~z)|(x & ~y & z), form="cnf")
// x &(y | z)&(~y | ~z)
result_cnf = {{1, 2, 3, 4, 5, 6, 7, -10}, {1, 2, 3, 4, 5, 6, 8, -10}, {1, 2, 3, 4, 5, 6, 9, -10}, {1, 2, 3, 4, 5, 7, 8, -10}, {1, 2, 3, 4, 5, 7, 9, -10}, {1, 2, 3, 4, 5, 8, 9, -10}, {1, 2, 3, 4, 6, 7, 8, -10}, {1, 2, 3, 4, 6, 7, 9, -10}, {1, 2, 3, 4, 6, 8, 9, -10}, {1, 2, 3, 4, 7, 8, 9, -10}, {1, 2, 3, 5, 6, 7, 8, -10}, {1, 2, 3, 5, 6, 7, 9, -10}, {1, 2, 3, 5, 6, 8, 9, -10}, {1, 2, 3, 5, 7, 8, 9, -10}, {1, 2, 3, 6, 7, 8, 9, -10}, {1, 2, 4, 5, 6, 7, 8, -10}, {1, 2, 4, 5, 6, 7, 9, -10}, {1, 2, 4, 5, 6, 8, 9, -10}, {1, 2, 4, 5, 7, 8, 9, -10}, {1, 2, 4, 6, 7, 8, 9, -10}, {1, 2, 5, 6, 7, 8, 9, -10}, {1, 3, 4, 5, 6, 7, 8, -10}, {1, 3, 4, 5, 6, 7, 9, -10}, {1, 3, 4, 5, 6, 8, 9, -10}, {1, 3, 4, 5, 7, 8, 9, -10}, {1, 3, 4, 6, 7, 8, 9, -10}, {1, 3, 5, 6, 7, 8, 9, -10}, {1, 4, 5, 6, 7, 8, 9, -10}, {2, 3, 4, 5, 6, 7, 8, -10}, {2, 3, 4, 5, 6, 7, 9, -10}, {2, 3, 4, 5, 6, 8, 9, -10}, {2, 3, 4, 5, 7, 8, 9, -10}, {2, 3, 4, 6, 7, 8, 9, -10}, {2, 3, 5, 6, 7, 8, 9, -10}, {2, 4, 5, 6, 7, 8, 9, -10}, {3, 4, 5, 6, 7, 8, 9, -10}, {-1, -10, -2, -3, -4}, {-1, -10, -2, -3, -5}, {-1, -10, -2, -3, -6}, {-1, -10, -2, -3, -7}, {-1, -10, -2, -3, -8}, {-1, -10, -2, -4, -5}, {-1, -10, -2, -4, -6}, {-1, -10, -2, -4, -7}, {-1, -10, -2, -4, -8}, {-1, -10, -2, -5, -6}, {-1, -10, -2, -5, -7}, {-1, -10, -2, -5, -8}, {-1, -10, -2, -6, -7}, {-1, -10, -2, -6, -8}, {-1, -10, -2, -7, -8}, {-1, -10, -3, -4, -5}, {-1, -10, -3, -4, -6}, {-1, -10, -3, -4, -7}, {-1, -10, -3, -4, -8}, {-1, -10, -3, -5, -6}, {-1, -10, -3, -5, -7}, {-1, -10, -3, -5, -8}, {-1, -10, -3, -6, -7}, {-1, -10, -3, -6, -8}, {-1, -10, -3, -7, -8}, {-1, -10, -4, -5, -6}, {-1, -10, -4, -5, -7}, {-1, -10, -4, -5, -8}, {-1, -10, -4, -6, -7}, {-1, -10, -4, -6, -8}, {-1, -10, -4, -7, -8}, {-1, -10, -5, -6, -7}, {-1, -10, -5, -6, -8}, {-1, -10, -5, -7, -8}, {-1, -10, -6, -7, -8}, {-10, -2, -3, -4, -5}, {-10, -2, -3, -4, -6}, {-10, -2, -3, -4, -7}, {-10, -2, -3, -4, -8}, {-10, -2, -3, -5, -6}, {-10, -2, -3, -5, -7}, {-10, -2, -3, -5, -8}, {-10, -2, -3, -6, -7}, {-10, -2, -3, -6, -8}, {-10, -2, -3, -7, -8}, {-10, -2, -4, -5, -6}, {-10, -2, -4, -5, -7}, {-10, -2, -4, -5, -8}, {-10, -2, -4, -6, -7}, {-10, -2, -4, -6, -8}, {-10, -2, -4, -7, -8}, {-10, -2, -5, -6, -7}, {-10, -2, -5, -6, -8}, {-10, -2, -5, -7, -8}, {-10, -2, -6, -7, -8}, {-10, -3, -4, -5, -6}, {-10, -3, -4, -5, -7}, {-10, -3, -4, -5, -8}, {-10, -3, -4, -6, -7}, {-10, -3, -4, -6, -8}, {-10, -3, -4, -7, -8}, {-10, -3, -5, -6, -7}, {-10, -3, -5, -6, -8}, {-10, -3, -5, -7, -8}, {-10, -3, -6, -7, -8}, {-10, -4, -5, -6, -7}, {-10, -4, -5, -6, -8}, {-10, -4, -5, -7, -8}, {-10, -4, -6, -7, -8}, {-10, -5, -6, -7, -8}, {1, 10, 2, 3, 4, 5, -6, -7, -8}, {1, 10, 2, 3, 4, 6, -5, -7, -8}, {1, 10, 2, 3, 4, 7, -5, -6, -8}, {1, 10, 2, 3, 4, 8, -5, -6, -7}, {1, 10, 2, 3, 5, 6, -4, -7, -8}, {1, 10, 2, 3, 5, 6, -4, -7, -9}, {1, 10, 2, 3, 5, 6, -7, -8, -9}, {1, 10, 2, 3, 5, 7, -4, -6, -8}, {1, 10, 2, 3, 5, 7, -4, -6, -9}, {1, 10, 2, 3, 5, 7, -4, -8, -9}, {1, 10, 2, 3, 5, 7, -6, -8, -9}, {1, 10, 2, 3, 5, 8, -4, -6, -7}, {1, 10, 2, 3, 5, 8, -6, -7, -9}, {1, 10, 2, 3, 6, 7, -4, -5, -8}, {1, 10, 2, 3, 6, 7, -4, -5, -9}, {1, 10, 2, 3, 6, 7, -5, -8, -9}, {1, 10, 2, 3, 6, 8, -4, -5, -7}, {1, 10, 2, 3, 6, 8, -5, -7, -9}, {1, 10, 2, 3, 7, 8, -4, -5, -6}, {1, 10, 2, 3, 7, 8, -5, -6, -9}, {1, 10, 2, 4, 5, 6, -3, -7, -8}, {1, 10, 2, 4, 5, 7, -3, -6, -8}, {1, 10, 2, 4, 5, 8, -3, -6, -7}, {1, 10, 2, 4, 6, 7, -3, -5, -8}, {1, 10, 2, 4, 6, 8, -3, -5, -7}, {1, 10, 2, 4, 7, 8, -3, -5, -6}, {1, 10, 2, 5, 6, 7, -3, -4, -8}, {1, 10, 2, 5, 6, 7, -3, -4, -9}, {1, 10, 2, 5, 6, 7, -3, -8, -9}, {1, 10, 2, 5, 6, 8, -3, -4, -7}, {1, 10, 2, 5, 6, 8, -3, -7, -9}, {1, 10, 2, 5, 7, 8, -3, -4, -6}, {1, 10, 2, 5, 7, 8, -3, -6, -9}, {1, 10, 2, 6, 7, 8, -3, -4, -5}, {1, 10, 2, 6, 7, 8, -3, -5, -9}, {1, 10, 3, 4, 5, 6, -2, -7, -8}, {1, 10, 3, 4, 5, 7, -2, -6, -8}, {1, 10, 3, 4, 5, 8, -2, -6, -7}, {1, 10, 3, 4, 6, 7, -2, -5, -8}, {1, 10, 3, 4, 6, 8, -2, -5, -7}, {1, 10, 3, 4, 7, 8, -2, -5, -6}, {1, 10, 3, 5, 6, 7, -2, -4, -8}, {1, 10, 3, 5, 6, 7, -2, -4, -9}, {1, 10, 3, 5, 6, 7, -2, -8, -9}, {1, 10, 3, 5, 6, 8, -2, -4, -7}, {1, 10, 3, 5, 6, 8, -2, -7, -9}, {1, 10, 3, 5, 7, 8, -2, -4, -6}, {1, 10, 3, 5, 7, 8, -2, -6, -9}, {1, 10, 3, 6, 7, 8, -2, -4, -5}, {1, 10, 3, 6, 7, 8, -2, -5, -9}, {1, 10, 4, 5, 6, 7, -2, -3, -8}, {1, 10, 4, 5, 6, 8, -2, -3, -7}, {1, 10, 4, 5, 7, 8, -2, -3, -6}, {1, 10, 4, 6, 7, 8, -2, -3, -5}, {1, 10, 5, 6, 7, 8, -2, -3, -4}, {1, 10, 5, 6, 7, 8, -2, -3, -9}, {10, 2, 3, 4, 5, 6, -1, -7, -8}, {10, 2, 3, 4, 5, 7, -1, -6, -8}, {10, 2, 3, 4, 5, 8, -1, -6, -7}, {10, 2, 3, 4, 6, 7, -1, -5, -8}, {10, 2, 3, 4, 6, 8, -1, -5, -7}, {10, 2, 3, 4, 7, 8, -1, -5, -6}, {10, 2, 3, 5, 6, 7, -1, -4, -8}, {10, 2, 3, 5, 6, 7, -1, -4, -9}, {10, 2, 3, 5, 6, 7, -1, -8, -9}, {10, 2, 3, 5, 6, 8, -1, -4, -7}, {10, 2, 3, 5, 6, 8, -1, -7, -9}, {10, 2, 3, 5, 7, 8, -1, -4, -6}, {10, 2, 3, 5, 7, 8, -1, -6, -9}, {10, 2, 3, 6, 7, 8, -1, -4, -5}, {10, 2, 3, 6, 7, 8, -1, -5, -9}, {10, 2, 4, 5, 6, 7, -1, -3, -8}, {10, 2, 4, 5, 6, 8, -1, -3, -7}, {10, 2, 4, 5, 7, 8, -1, -3, -6}, {10, 2, 4, 6, 7, 8, -1, -3, -5}, {10, 2, 5, 6, 7, 8, -1, -3, -4}, {10, 2, 5, 6, 7, 8, -1, -3, -9}, {10, 3, 4, 5, 6, 7, -1, -2, -8}, {10, 3, 4, 5, 6, 8, -1, -2, -7}, {10, 3, 4, 5, 7, 8, -1, -2, -6}, {10, 3, 4, 6, 7, 8, -1, -2, -5}, {10, 3, 5, 6, 7, 8, -1, -2, -4}, {10, 3, 5, 6, 7, 8, -1, -2, -9}, {10, 4, 5, 6, 7, 8, -1, -2, -3}};
return result_cnf;
}
// 0...N-1: previous position
// N...2N - 1: current position
vector<std::vector<int>> cnf_universal() {
// universal "conways rules" CNF
CNF cnf;
CNF cnf_conway_cell = universal_cnf() ;
for(int y = 0; y < n; ++y){
for(int x = 0; x < n; ++x){
int pos_previous = y * n + x;
int pos_current = y * n + x + N;
vector<int> positions = generate_positions(x, y);
positions.push_back(pos_previous);
positions.push_back(pos_current);
for(auto& c : cnf_conway_cell){
vector<int> next;
for(auto& p : c){
if(p < 0){
next.push_back(-positions[-p] - 1);
} else {
next.push_back(positions[p] + 1);
}
}
cnf.push_back(next);
}
}
}
cout << "CNF size: " << cnf.size() << endl;
return cnf;
}
const static CNF cnf_conway_rules = cnf_universal() ;
CNF get_cnf(const task& t, const std::pair<int, int>& ignore_range = {0, 0}){
int k = t.steps;
if(k == 1){
int k = t.steps;
CNF cnf = cnf_conway_rules;
for(int iter = 1; iter < k; ++iter){
CNF cnf_i = cnf_conway_rules;
for(auto& i : cnf_i){
for(auto& j : i){
if(j > 0)j += N * iter;
else j -= N * iter;
}
cnf.push_back(i);
}
}
// fulfill CNF for current field
for(int i = 0; i < N; ++i){
int var = i + k * N + 1;
if(t.field[i]){
cnf.push_back({var});
} else {
cnf.push_back({-var});
}
}
return cnf;
}
// First level
CNF cnf = cnf_conway_rules;
// Middle levels: not first, not last
for(int iter = 1; iter + 1 < k; ++iter){
CNF cnf_i = cnf_conway_rules;
for(auto& i : cnf_i){
for(auto& j : i){
if(j > 0)j += N * iter;
else j -= N * iter;
}
cnf.push_back(i);
}
}
// fulfill CNF for current field
CNF cnf_last = cnf_conway_rules;
for(auto& i : cnf_last){
int to_ignore = 0;
for(auto& j : i){
for(int l = N + 1; l <= N + N; ++l){
if(j == l || j == -l){
to_ignore = j;
}
}
}
int pos = -1;
if(to_ignore > 0){
pos = to_ignore - N - 1;
if(t.field[pos])continue;
}
if(to_ignore < 0){
pos = -to_ignore - N - 1;
if(!t.field[-to_ignore - N - 1])continue;
}
if(pos >= ignore_range.first && pos < ignore_range.second){
continue;
}
std::vector<int> c;
for(auto& j : i){
if(j == to_ignore)continue;
if(j > 0)
c.push_back(j + N *(k - 1)) ;
else
c.push_back(j - N *(k - 1)) ;
}
cnf.push_back(c);
}
return cnf;
}
} // namespace CNF_solver
struct KissatSolver {
KissatSolver(double timeout = 2.1)
: timeout(timeout)
, cleaner([this]() -> void {
cleanup() ;
})
{
}
~KissatSolver() {
cleaner.join() ;
}
kissat* add(string id){
kissat* solver = kissat_init() ;
{
lock_guard<mutex> g(m);
if(all.find(id)!= all.end())throw std::runtime_error("Kissat solver already contains" + id);
all[id] = {solver, std::chrono::system_clock::now() };
}
return solver;
}
void notify_done(string id){
lock_guard<mutex> g(m);
to_free.insert(id);
}
void cleanup() {
while(!is_stopped){
this_thread::sleep_for(300ms);
lock_guard<mutex> g(m);
int to_clean = 0;
for(const auto& id : to_free){
auto& item = all[id];
kissat* solver = item.first;
kissat_release(solver);
all.erase(id);
++to_clean;
}
to_free.clear() ;
int to_terminate = 0;
TStamp now = std::chrono::system_clock::now() ;
for(const auto& i : all){
auto item = i.second;
double running = std::chrono::duration_cast<std::chrono::seconds>(now - item.second ).count() ;
if(running > timeout){
kissat_terminate(item.first);
++to_terminate;
}
}
if(to_terminate || to_clean){
cout << "terminated " << to_terminate << " cleaned " << to_clean << endl;
}
}
}
private:
double timeout;
map<string, std::pair<kissat*, TStamp>> all;
thread cleaner;
set<string> to_free;
mutex m;
} kissat_solver;
void sat_solver_k(const task& t){
int k = t.steps;
CNF_solver::CNF cnf = CNF_solver::get_cnf(t);
kissat* solver = kissat_solver.add(t.id);
kissat_set_option(solver, "quiet", 1);
kissat_set_configuration(solver, "sat");
kissat_reserve(solver, N * k);
for(auto c : cnf){
for(auto i : c){
kissat_add(solver, i);
}
kissat_add(solver, 0);
}
int result = kissat_solve(solver);
switch(result){
case 20:
cerr << "wtf solver failed for " << t.id << endl;
break;
}
CW solution;
bool at_least_one_true = 0;
for(int i = 1; i <= N; ++i){
int next = kissat_value(solver, i);
if(next > 0){
solution.push_back(1);
at_least_one_true = true;
} else {
solution.push_back(0);
}
}
kissat_solver.notify_done(t.id);
int score = validate(solution, t);
global_cache.set({score, solution}, t);
if(score != N && at_least_one_true){
shake_single_if_equal(solution, t);
}
}
bool solve_sat(const task& t){
auto it = global_cache.get(t.id);
if(is_timeouted)return false;
if(it.first == N){
return true;
}
if(t.steps == 1){
sat_solver_k(t);
} else if(t.steps == 2 && weight(t)< 100){ /// ------------ RT_ADJ
sat_solver_k(t);
}
return global_cache.get(t.id ).first == N;
}
bool solve_fast(const task& t){
if(is_timeouted)return false;
auto it = global_cache.get(t.id);
if(it.first == N){
return true;
}
{
if(weight(t)< 200){
shake_single_if_equal(CW(N), t, true);
} else {
// shake_single_if_equal(make_random() , t, true);
}
shake_single_if_equal(make_checked() , t, true);
if(t.steps % 2 == 0)
shake_single_if_equal(t.field, t, true);
else
shake_single_if_equal(run_step(t.field), t, true);
}
// enabled this only as efficient
int score = it.first;
size_t iters = 5;
CW next = s2vec(it.second);
while(iters--){
CW next2 = shake_single_if_equal(next, t, true);
if(validate(next2, t)>= validate(next, t)) {
next = next2;
}
it = global_cache.get(t.id);
int new_score = it.first;
if(new_score == N)break;
if(new_score != score){
score = new_score;
next = s2vec(it.second);
iters = 11;
}
}
return global_cache.get(t.id ).first == N;
}
bool solve_slower(const task& t){
if(is_timeouted)return false;
auto it = global_cache.get(t.id);
if(it.first == N){
return true;
}
// enabled this only as efficient
int score = it.first;
size_t iters = 4;
CW next = s2vec(it.second);
while(iters--){
CW next2 = shake_single_if_equal(next, t);
CW next3 = shake_k_and_single(t, next2, 8);
if(validate(next3, t)>= validate(next, t)) {
next = next3;
}
if(validate(next2, t)>= validate(next, t)) {
next = next2;
}
it = global_cache.get(t.id);
int new_score = it.first;
if(new_score == N)break;
if(new_score != score){
score = new_score;
next = s2vec(it.second);
iters = 6;
}
}
/*
for(int i = 0; i < 4; ++i){
CW next = make_random() ;
for(int j = 0; j < 5; ++j){
next = shake_k_and_single(t, next, 30);
}
}
*/
// trying to run upfront far
// run_upfront(id, end, steps);
return global_cache.get(t.id ).first == N;
}
void validate_training(const std::string& file){
ifstream input(file);
string line;
input >> line;
while(input >> line){
vector<string> v = split(line, ',');
if(v.size() != static_cast<size_t>(N + N + 2)) {
cerr << "incorrect input within line within validate: " << line << endl;
terminate(1);
}
string id = v[0];
int steps = stoi(v[1]);
CW start(N);
CW end(N);
for(int i = 0; i < N; ++i){
start[i] =(v[i + 2] == "1");
end[i] =(v[N + i + 2] == "1");
}
if(validate(start, {id, end, steps})!= N){
cerr << "validate failed at: " << id << endl;
terminate(1);
}
}
}
pair<int, int> calc_stats(const std::vector<task>& tasks){
int count = 0;
int count_solved = 0;
int total_score = 0;
for(const task & t : tasks){
++count;
int score = global_cache.get(t.id ).first;
total_score += score;
if(score == N)++count_solved;
}
int max_score = count * N;
cout << "Solved: " << count_solved << "/" << count ;
cout << " :<>: " << total_score << "/" << max_score << " = " << static_cast<double>(total_score)/ max_score << endl;;
return {count_solved, total_score};
}
void print_stats(const vector<task>& tasks){
auto started = std::chrono::system_clock::now() ;
auto first_stats = calc_stats(tasks);
while(!is_stopped){
sleep(9);
{ // time-out for application ?
TStamp now = std::chrono::system_clock::now() ;
double total_running = std::chrono::duration_cast<std::chrono::seconds>(now - app_started ).count() ;
if(total_running > total_timeout){
is_timeouted = true;
}
}
double running =(std::chrono::system_clock::now() - started ).count() ;
auto current = calc_stats(tasks);
double speed1 =(current.first - first_stats.first)/ running * 1e9;
double speed2 =(current.second - first_stats.second)/ running * 1e9;
cout << "speed " << std::fixed << std::setprecision(6)<< speed1 << ' ' << speed2 << ' ';
}
calc_stats(tasks);
}
int main(int argc, char *argv[]){
validate_training("train.csv");
if(argc > 1 && strcmp(argv[1], "--only-validate")== 0){
cerr << "Validated train!" << endl;
terminate(0);
}
if(argc > 1 && strcmp(argv[1], "--timeout")== 0){
total_timeout = stoi(argv[2]);
}
ifstream input("test.csv");
string line;
// header
input >> line;
std::vector<task> all_tasks;
while(input >> line){
vector<string> v = split(line, ',');
if(v.size() != static_cast<size_t>(N + 2)) {
cerr << "incorrect input within line: " << line << endl;
terminate(1);
}
string id = v[0];
int steps = stoi(v[1]);
// if(id >= "51000")continue;
// if(steps != 3)continue;
CW field(N);
for(int i = 0; i < N; ++i){
field[i] = v[i + 2] == "1";
}
task t{id, field, steps};
CW empty(N);
global_cache.set({validate(empty, t), empty}, t);
all_tasks.push_back(t);
}
if(argc > 1 && strcmp(argv[1], "--shuffle")== 0){
cout << "shuffle tasks randomly" << endl;
shuffle(all_tasks.begin() , all_tasks.end() , g);
}
if(argc > 1 && strcmp(argv[1], "--sort-score")== 0){
cout << "sort by score" << endl;
sort(all_tasks.begin() , all_tasks.end() , [](const auto& i, const auto& j){
int score_a = global_cache.get(i.id ).first;
int score_b = global_cache.get(j.id ).first;
return score_a < score_b;
});
}
if(argc > 1 && strcmp(argv[1], "--sort-score-reverse")== 0){
cout << "sort by score(reverse" << endl;
sort(all_tasks.begin() , all_tasks.end() , [](const auto& i, const auto& j){
int score_a = global_cache.get(i.id ).first;
int score_b = global_cache.get(j.id ).first;
return score_a > score_b;
});
}
if(argc > 1 && strcmp(argv[1], "--sort-weight")== 0){
cout << "sort by weight" << endl;
sort(all_tasks.begin() , all_tasks.end() , [](const auto& i, const auto& j){
return(weight(i)< weight(j)) ;
});
}
cout << "solve SAT" << endl;
{
boost::asio::thread_pool pool(std::thread::hardware_concurrency());
for(const auto& t : all_tasks){
if(global_cache.get(t.id ).first == N)continue;
boost::asio::post(pool, [t]() {
solve_sat(t);
}
);
}
thread stats([all_tasks]() {print_stats(all_tasks); });
pool.join() ;
is_stopped = true;
stats.join() ;
}
cout << "solve fast" << endl;
{
is_stopped = false;
boost::asio::thread_pool pool(std::thread::hardware_concurrency());
for(const auto& t : all_tasks){
if(global_cache.get(t.id ).first == N)continue;
boost::asio::post(pool, [t]() {
solve_fast(t);
}
);
}
thread stats([all_tasks]() {print_stats(all_tasks); });
pool.join() ;
is_stopped = true;
stats.join() ;
}
while(!is_timeouted){
cout << "solve slow" << endl;
sort(all_tasks.begin() , all_tasks.end() , [](const auto& i, const auto& j){
return(weight(i)> weight(j)) ;
});
is_stopped = false;
boost::asio::thread_pool pool(std::thread::hardware_concurrency());
for(const auto& t : all_tasks){
if(global_cache.get(t.id ).first == N)continue;
boost::asio::post(pool, [t]() {
solve_slower(t);
}
);
}
thread stats([all_tasks]() {print_stats(all_tasks); });
pool.join() ;
is_stopped = true;
stats.join() ;
}
} | Conway's Reverse Game of Life 2020 |
13,173,972 | df['title'] = df['Name'].apply(lambda n: n.split(',')[1].split('.')[0].strip())
df[['title', 'Survived']].groupby('title' ).agg({'Survived': [
'mean', 'std', 'count']} ).sort_values(( 'Survived', 'count'), ascending=False )<categorify> | !apt-get install libboost-all-dev -y
| Conway's Reverse Game of Life 2020 |
13,173,972 | title_groups = {
"Capt": "sacrifies",
"Col": "army",
"Rev": "sacrifies",
"Major": "army",
"Mr" : "Mr",
"Master": "Master",
"Miss": "Miss",
"Mrs": "Mrs",
"Mme": "Mrs",
"Ms": "Mrs",
"Mlle": "Miss"
}
df['title_group'] = df['title'].apply(lambda t: title_groups[t] if t in title_groups else 'other')
df = pd.concat([df, pd.get_dummies(df['title_group'], prefix='title_group')], axis=1)
t_g = df['title_group'].unique()
useful.extend(['title_group_{}'.format(x)for x in t_g] )<sort_values> | if KAGGLE:
pass
else:
!g++ -o main-RT main-RT.cpp -std=c++17 -g -O3 -lpthread -Wall -Wextra -march=native -funroll-loops -fno-omit-frame-pointer kissat/build/libkissat.a kissat/build/handle.o | Conway's Reverse Game of Life 2020 |
13,173,972 | <prepare_x_and_y><EOS> | %%writefile dump.cpp
using namespace std;
int N = 625;
int main() {
map<string, pair<int, string>> cache;
{
ifstream cache_file("processed/cache");
string id;
int score;
string field;
while(cache_file >> id >> score >> field){
auto it = cache[id];
if(it.first < score){
cache[id] = {score, field};
}
}
}
string time_suffix = to_string(time(nullptr)) ;
ofstream dump_file("processed/dump-" + time_suffix + ".csv");
ofstream shrink_cache("processed/shrink-cache-" + time_suffix);
dump_file << "id";
for(int i = 0; i < N; ++i){
dump_file << ",start_" << i;
}
dump_file << '
';
for(auto i : cache){
dump_file << i.first << "," << i.second.second << '
';
shrink_cache << i.first << ' ' << i.second.first << ' ' << i.second.second << '
';
}
} | Conway's Reverse Game of Life 2020 |
12,266,838 | <SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<train_model> | import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
import pandas as pd
from PIL import Image
import time
import math
from tqdm.auto import tqdm | Conway's Reverse Game of Life 2020 |
12,266,838 | clf = RandomForestClassifier(n_estimators=100, random_state=1)
clf = clf.fit(train_X, train_y )<drop_column> | print(sys.version ) | Conway's Reverse Game of Life 2020 |
12,266,838 | useful = importances.tail(33 ).index.tolist()
useful.remove('title_group_Mr')
useful.remove('Fare')
useful.remove('older_relative_age')
train_X=train[useful]
train_X.shape<train_on_grid> | import tensorflow as tf | Conway's Reverse Game of Life 2020 |
12,266,838 | search_best_hyperparameters = False
if search_best_hyperparameters:
parameter_grid = {
'n_estimators': [10, 20, 50, 100, 200, 500],
'learning_rate': [0.1, 0.2, 0.5, 1, 1.2],
'random_state': [1]
}
model = AdaBoostClassifier()
gs = GridSearchCV(
model,
scoring='accuracy',
param_grid=parameter_grid,
cv=4,
n_jobs=-1)
gs.fit(train_X, train_y)
params = gs.best_params_
print(params)
else:
params = {
'learning_rate': 0.1,
'n_estimators': 500,
'random_state': 1
}<train_model> | tf.__version__ | Conway's Reverse Game of Life 2020 |
12,266,838 | clf = AdaBoostClassifier(**params)
clf = clf.fit(train_X, train_y)
clf.score(train_X, train_y )<predict_on_test> | SIZE = 25 | Conway's Reverse Game of Life 2020 |
12,266,838 | test = df[df['Survived'].isnull() ]
test_X = test[useful]
test_y = clf.predict(test_X )<data_type_conversions> | def life_step_1(X):
nbrs_count = sum(np.roll(np.roll(X, i, 0), j, 1)
for i in(-1, 0, 1)for j in(-1, 0, 1)
if(i != 0 or j != 0))
return(nbrs_count == 3)|(X &(nbrs_count == 2)) | Conway's Reverse Game of Life 2020 |
12,266,838 | submit = pd.DataFrame(test_y.astype(int), index=test_X.index, columns=['Survived'])
submit.head()<save_to_csv> | train_df = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/train.csv')
test_df = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/test.csv')
print(train_df.shape)
print(test_df.shape ) | Conway's Reverse Game of Life 2020 |
12,266,838 | submit.to_csv('submission.csv' )<import_modules> | train_df.groupby(['delta'] ).size() | Conway's Reverse Game of Life 2020 |
12,266,838 | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from sympy import simplify, cos, sin, Symbol, Function, tanh, pprint, init_printing, exp
from sympy.functions import Min,Max
<prepare_x_and_y> | sample_start = train_sample.loc[:, train_sample.columns.str.startswith('start')]
sample_stop = train_sample.loc[:, train_sample.columns.str.startswith('stop')] | Conway's Reverse Game of Life 2020 |
12,266,838 | A = 0.058823499828577
B = 0.841127
C = 0.138462007045746
D = 0.31830988618379069
E = 2.810815
F = 0.63661977236758138
G = 5.428569793701172
H = 3.1415926535897931
I = 0.592158
J = 4.869778
K = 0.063467
L = -0.091481
M = 0.0821533
N = 0.720430016517639
O = 0.230145
P = 9.89287
Q = 785
R = 1.07241
S = 281
T = 734
U = 5.3
V = 67.0
W = 2.484848
X = 8.48635
Y = 63
Z = 12.6275
AA = 0.735354
AB = 727
AC = 2.5
AD = 2.6
AE = 0.3
AF = 3.0
AG = 0.226263
AH = 2.0
AI = 12.4148
AJ = 96
AK = 0.130303
AL = 176
AM = 3.2
BIG = [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM]
<compute_test_metric> | start_arr = np.asarray(sample_start ).reshape(25, 25)
stop_arr = np.asarray(sample_stop ).reshape(25, 25)
time_step = train_sample['delta'].values[0]
print(time_step ) | Conway's Reverse Game of Life 2020 |
12,266,838 | def GeneticFunction(data,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM):
return(( np.minimum(((((A + data["Sex"])- np.cos(( data["Pclass"] / AH)))* AH)) ,(( B)))* AH)+
np.maximum(((data["SibSp"] - AC)) ,(-(np.minimum(( data["Sex"]),(np.sin(data["Parch"])))* data["Pclass"])))+
(AG *(( np.minimum(( data["Sex"]),(((data["Parch"] / AH)/ AH)))* data["Age"])- data["Cabin"])) +
np.minimum(((np.sin(( data["Parch"] *(( data["Fare"] - AA)* AH)))* AH)) ,(( data["SibSp"] / AH)))+
np.maximum(( np.minimum(( -np.cos(data["Embarked"])) ,(C))),(np.sin(((data["Cabin"] - data["Fare"])* AH)))) +
-np.minimum(((((data["Age"] * data["Parch"])* data["Embarked"])+ data["Parch"])) ,(np.sin(data["Pclass"])))+
np.minimum(( data["Sex"]),(( np.sin(-(data["Fare"] * np.cos(( data["Fare"] * W)))) / AH)))+
np.minimum(((O)) ,(np.sin(np.minimum(((( V / AH)* np.sin(data["Fare"]))),(D)))))+
np.sin(( np.sin(data["Cabin"])*(np.sin(( Z)) * np.maximum(( data["Age"]),(data["Fare"])))))+
np.sin(((np.minimum(( data["Fare"]),(( data["Cabin"] * data["Embarked"])))/ AH)* -data["Fare"])) +
np.minimum(((( AD * data["SibSp"])* np.sin(((AJ)* np.sin(data["Cabin"]))))),(data["Parch"])) +
np.sin(np.sin(( np.maximum(( np.minimum(( data["Age"]),(data["Cabin"]))),(( data["Fare"] * AK)))* data["Cabin"])))+
np.maximum(( np.sin(((AI)*(data["Age"] / AH)))) ,(np.sin(( -AF * data["Cabin"])))) +
(np.minimum(( np.sin(((( np.sin(((data["Fare"] * AH)* AH)) * AH)* AH)* AH))),(data["SibSp"])) / AH)+
(( data["Sex"] - data["SibSp"])*(np.cos(((data["Embarked"] - AA)+ data["Age"])) / AH)) +
(( np.sin(data["Cabin"])/ AH)-(np.cos(np.minimum(( data["Age"]),(data["Embarked"])))* np.sin(data["Embarked"])))+
np.minimum(( AE),(( data["Sex"] *(J *(N - np.sin(( data["Age"] * AH)))))))+
(np.minimum(( np.cos(data["Fare"])) ,(np.maximum(( np.sin(data["Age"])) ,(data["Parch"])))) * np.cos(( data["Fare"] / AH)))+
np.sin(( data["Parch"] * np.minimum(((data["Age"] - K)) ,(( np.cos(( data["Pclass"] * AH)) / AH)))))+
(data["Parch"] *(np.sin(((data["Fare"] *(I * data["Age"])) * AH)) / AH)) +
(D * np.cos(np.maximum(((0.5 * data["Fare"])) ,(( np.sin(N)* data["Age"])))))+
(np.minimum(((data["SibSp"] / AH)) ,(np.sin(((data["Pclass"] - data["Fare"])* data["SibSp"])))) * data["SibSp"])+
np.tanh(( data["Sex"] * np.sin(( U * np.sin(( data["Cabin"] * np.cos(data["Fare"])))))))+
(np.minimum(( data["Parch"]),(data["Sex"])) * np.cos(np.maximum(((np.cos(data["Parch"])+ data["Age"])) ,(AM)))) +
(np.minimum(( np.tanh(((data["Cabin"] / AH)+ data["Parch"]))),(( data["Sex"] + np.cos(data["Age"])))) / AH)+
(np.sin(( np.sin(data["Sex"])*(np.sin(( data["Age"] * data["Pclass"])) * data["Pclass"])))/ AH)+
(data["Sex"] *(np.cos(((data["Sex"] + data["Fare"])*(( X)*(Y)))) / AH)) +
np.minimum(( data["Sex"]),(( np.cos(( data["Age"] * np.tanh(np.sin(np.cos(data["Fare"])))))/ AH)))+
(np.tanh(np.tanh(-np.cos(( np.maximum(( np.cos(data["Fare"])) ,(L)) * data["Age"])))) / AH)+
(np.tanh(np.cos(( np.cos(data["Age"])+(data["Age"] + np.minimum(( data["Fare"]),(data["Age"])))))) / AH)+
(np.tanh(np.cos(( data["Age"] *(( -AH + np.sin(data["SibSp"])) + data["Fare"])))) / AH)+
(np.minimum(((( S)- data["Fare"])) ,(np.sin(( np.maximum(((AL)) ,(data["Fare"])) * data["SibSp"])))) * AH)+
np.sin(((np.maximum(( data["Embarked"]),(data["Age"])) * AH)*(((Q)* H)* data["Age"])))+
np.minimum(( data["Sex"]),(np.sin(-(np.minimum(((data["Cabin"] / AH)) ,(data["SibSp"])) *(data["Fare"] / AH)))))+
np.sin(np.sin(( data["Cabin"] *(data["Embarked"] +(np.tanh(-data["Age"])+ data["Fare"])))))+
(np.cos(np.cos(data["Fare"])) *(np.sin(( data["Embarked"] -(( T)* data["Fare"])))/ AH)) +
(( np.minimum(( data["SibSp"]),(np.cos(data["Fare"])))* np.cos(data["SibSp"])) * np.sin(( data["Age"] / AH)))+
(np.sin(( np.sin(( data["SibSp"] * np.cos(( data["Fare"] * AH)))) +(data["Cabin"] * AH)))/ AH)+
(((data["Sex"] * data["SibSp"])* np.sin(np.sin(-(data["Fare"] * data["Cabin"])))) * AH)+
(np.sin(( data["SibSp"] *(((( G + V)* AH)/ AH)* data["Age"])))/ AH)+
(data["Pclass"] *(np.sin(((data["Embarked"] * data["Cabin"])*(data["Age"] -(R)))) / AH)) +
(np.cos(((((-data["SibSp"] + data["Age"])+ data["Parch"])* data["Embarked"])/ AH)) / AH)+
(D * np.sin(((data["Age"] *(( data["Embarked"] * np.sin(data["Fare"])) * AH)) * AH)))+
(( np.minimum(((data["Age"] * A)) ,(data["Sex"])) - F)* np.tanh(np.sin(data["Pclass"])))+
-np.minimum(((np.cos(((AB)*(( data["Fare"] + data["Parch"])* AH)))/ AH)) ,(data["Fare"])) +
(np.minimum(( np.cos(data["Fare"])) ,(data["SibSp"])) * np.minimum(( np.sin(data["Parch"])) ,(np.cos(( data["Embarked"] * AH)))))+
(np.minimum(((( data["Fare"] / AH)- E)) ,(C)) * np.sin(( K * data["Age"])))+
np.minimum(((M)) ,(((np.sin(data["Fare"])+ data["Embarked"])- np.cos(( data["Age"] *(P)))))) )<feature_engineering> | updated_arr = np.copy(start_arr)
steps = []
steps.append(updated_arr)
for x in range(time_step):
updated_arr = life_step_1(updated_arr)
steps.append(updated_arr ) | Conway's Reverse Game of Life 2020 |
12,266,838 | def CleanData(data):
data.drop(['Ticket', 'Name'], inplace=True, axis=1)
data.Sex.fillna('0', inplace=True)
data.loc[data.Sex != 'male', 'Sex'] = 0
data.loc[data.Sex == 'male', 'Sex'] = 1
data.Cabin.fillna('0', inplace=True)
data.loc[data.Cabin.str[0] == 'A', 'Cabin'] = 1
data.loc[data.Cabin.str[0] == 'B', 'Cabin'] = 2
data.loc[data.Cabin.str[0] == 'C', 'Cabin'] = 3
data.loc[data.Cabin.str[0] == 'D', 'Cabin'] = 4
data.loc[data.Cabin.str[0] == 'E', 'Cabin'] = 5
data.loc[data.Cabin.str[0] == 'F', 'Cabin'] = 6
data.loc[data.Cabin.str[0] == 'G', 'Cabin'] = 7
data.loc[data.Cabin.str[0] == 'T', 'Cabin'] = 8
data.loc[data.Embarked == 'C', 'Embarked'] = 1
data.loc[data.Embarked == 'Q', 'Embarked'] = 2
data.loc[data.Embarked == 'S', 'Embarked'] = 3
data.Embarked.fillna(0, inplace=True)
data.fillna(-1, inplace=True)
return data.astype(float)
def Outputs(data):
return np.round(1.-(1./(1.+np.exp(-data))))
<load_from_csv> | single_step_df = train_df.loc[train_df['delta'] == 1] | Conway's Reverse Game of Life 2020 |
12,266,838 | raw_train = pd.read_csv('.. /input/titanic/train.csv')
raw_test = pd.read_csv('.. /input/titanic/test.csv')
cleanedTrain = CleanData(raw_train)
cleanedTest = CleanData(raw_test )<create_dataframe> | idx = 13970
single_img = np.asarray(single_step_df.loc[idx][2:627] ).reshape(25,25 ).astype('uint8' ) | Conway's Reverse Game of Life 2020 |
12,266,838 | thisArray = BIG.copy()
testPredictions = Outputs(GeneticFunction(cleanedTrain,thisArray[0],thisArray[1],thisArray[2],thisArray[3],thisArray[4],thisArray[5],thisArray[6],thisArray[7],thisArray[8],thisArray[9],thisArray[10],thisArray[11],thisArray[12],thisArray[13],thisArray[14],thisArray[15],thisArray[16],thisArray[17],thisArray[18],thisArray[19],thisArray[20],thisArray[21],thisArray[22],thisArray[23],thisArray[24],thisArray[25],thisArray[26],thisArray[27],thisArray[28],thisArray[29],thisArray[30],thisArray[31],thisArray[32],thisArray[33],thisArray[34],thisArray[35],thisArray[36],thisArray[37],thisArray[38]))
pdcheck = pd.DataFrame({'Survived': testPredictions.astype(int)})
ret = pdcheck.Survived.where(pdcheck["Survived"].values==cleanedTrain["Survived"].values ).notna()
t,f = ret.value_counts()
score = 100/(t+f)*t
print("Training set score: ",score)
<save_to_csv> | end_single_img_33 = life_step_1(single_img_33 ) | Conway's Reverse Game of Life 2020 |
12,266,838 | testPredictions = Outputs(GeneticFunction(cleanedTest,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,AA,AB,AC,AD,AE,AF,AG,AH,AI,AJ,AK,AL,AM))
pdtest = pd.DataFrame({'PassengerId': cleanedTest.PassengerId.astype(int),
'Survived': testPredictions.astype(int)})
pdtest.to_csv('submission.csv', index=False)
pdtest.head()<import_modules> | def postprocess(arr):
return arr[4:-4, 4:-4] | Conway's Reverse Game of Life 2020 |
12,266,838 | from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler,MinMaxScaler
import pickle
import string<define_variables> | start_key = ['start_' + str(i)for i in range(625)]
stop_key = ['stop_' + str(i)for i in range(625)] | Conway's Reverse Game of Life 2020 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.