markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
stage2を結合
|
# 表記揺れの確認
print(np.sort(train['stage'].unique()))
print(np.sort(test['stage'].unique()))
# 「mystery~」はイベント時に解放されるステージ、今回のtrain,testデータには無し
print(np.sort(stage2['key'].unique()))
stage2_r.columns
# 結合のため列名変更
stage2_r = stage2.rename(columns = {'key':'stage', 'area':'stage_size2'})
# 必要カラム
st2_col = ['stage_size2', # ステージの面積
'stage', # ステージ名
# 'name', # 外国語ステージ名
# 'release_at', #リリース日時
# 'short_name', # 省略名
# 'splatnet' # ID?
]
stage2_rc = stage2_r[st2_col]
# 結合
train_ss = pd.merge(train_s, stage2_rc, on = 'stage', how = 'left')
test_ss = pd.merge(test_s, stage2_rc, on = 'stage', how = 'left')
# null確認
print(train_ss[['stage_size2']].isnull().sum())
print(test_ss[['stage_size2']].isnull().sum())
|
stage_size2 0
dtype: int64
stage_size2 0
dtype: int64
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
weaponを結合
|
# trainのブキ
train_weapon = sorted(list(set(train['A1-weapon'])&set(train['A2-weapon'])&set(train['A3-weapon'])&set(train['A4-weapon'])\
&set(train['B1-weapon'])&set(train['B2-weapon'])&set(train['B3-weapon'])&set(train['B4-weapon'])))
print('{}種類'.format(len(train_weapon)))
print(train_weapon)
# testのブキ
test_weapon = sorted(list(set(test['A1-weapon'])&set(test['A2-weapon'])&set(test['A3-weapon'])&set(test['A4-weapon'])\
&set(test['B1-weapon'])&set(test['B2-weapon'])&set(test['B3-weapon'])&set(test['B4-weapon'])))
print('{}種類'.format(len(test_weapon)))
print(test_weapon)
# 外部データのブキ
gaibu_weapon = train_weapon = np.sort(weapon['key'].unique())
print('{}種類'.format(len(gaibu_weapon)))
print(gaibu_weapon)
# 表記に差分ないか比較→無し
print(set(train_weapon)-set(gaibu_weapon))
print(set(gaibu_weapon)-set(train_weapon))
print(set(test_weapon)-set(gaibu_weapon))
print(set(gaibu_weapon)-set(test_weapon))
# 必要カラム
# 参照:https://stat.ink/api-info/weapon2
weapon_col = ['category1', # ブキ区分
'category2', # ブキ区分
'key', # ブキ名
'subweapon', # サブウェポン
'special', # スペシャルウェポン
'mainweapon', # メインブキ
'reskin', # 同一性能のブキ
# 'splatnet', # アプリのユーザID
# 以下外国語ブキ名
# '[de-DE]', '[en-GB]', '[en-US]', '[es-ES]','[es-MX]', '[fr-CA]',
# '[fr-FR]', '[it-IT]', '[ja-JP]', '[nl-NL]','[ru-RU]', '[zh-CN]', '[zh-TW]'
]
# 必要カラム抽出&結合キー名変更
weapon_c = weapon[weapon_col].rename(columns = {'key': 'weapon'})
weapon_c.head(3)
# 各A1~B4-weapon列に対して結合
weapon_cc = weapon_c.copy()
train_ssw = train_ss.copy()
test_ssw = test_ss.copy()
import itertools
for a,num in itertools.product(['A','B'],[1,2,3,4]):
col_list = []
# ブキのカラム名の先頭にA1~B4追加
for col in weapon_c.columns:
tmp_col = a+str(num) + '-' + col
col_list.append(tmp_col)
weapon_cc.columns = col_list
#train,testに結合
train_ssw = pd.merge(train_ssw, weapon_cc, on = a+str(num) + '-weapon', how = 'left')
test_ssw = pd.merge(test_ssw, weapon_cc, on = a+str(num) + '-weapon', how = 'left')
# 結合後nullチェック
print(train_ssw[col_list].isnull().sum())
print(test_ssw[col_list].isnull().sum())
# 元データにweapon情報がないもののみ(回線落ち)がnullなのでok
train_input = train_ssw.copy()
test_input = test_ssw.copy()
|
_____no_output_____
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
前処理
|
# 欠損値埋める
def fill_all_null(df, num):
for col_name in df.columns[df.isnull().sum()!=0]:
df[col_name] = df[col_name].fillna(num)
# 訓練データ、テストデータの欠損値を-1で補完
fill_all_null(train_input, -1)
fill_all_null(test_input, -1)
# ターゲットエンコーディングの関数定義
## Holdout TSを用いる 変更の余地あり
def change_to_target2(train_df,test_df,input_column_name,output_column_name):
from sklearn.model_selection import KFold
# nan埋め処理
## 上でやってるのでいらない
# train_df[input_column_name] = train_df[input_column_name].fillna('-1')
# test_df[input_column_name] = test_df[input_column_name].fillna('-1')
kf = KFold(n_splits=5, shuffle=True, random_state=71)
#=========================================================#
c=input_column_name
# 学習データ全体で各カテゴリにおけるyの平均を計算
data_tmp = pd.DataFrame({c: train_df[c],'target':train_df['y']})
target_mean = data_tmp.groupby(c)['target'].mean()
#テストデータのカテゴリを置換★
test_df[output_column_name] = test_df[c].map(target_mean)
# 変換後の値を格納する配列を準備
tmp = np.repeat(np.nan, train_df.shape[0])
for i, (train_index, test_index) in enumerate(kf.split(train_df)): # NFOLDS回まわる
#学習データについて、各カテゴリにおける目的変数の平均を計算
target_mean = data_tmp.iloc[train_index].groupby(c)['target'].mean()
#バリデーションデータについて、変換後の値を一時配列に格納
tmp[test_index] = train_df[c].iloc[test_index].map(target_mean)
#変換後のデータで元の変数を置換
train_df[output_column_name] = tmp
#========================================================#
# オブジェクトの列のリストを作成
object_col_list = train_input.select_dtypes(include=object).columns
# オブジェクトの列は全てターゲットエンコーディング実施
for col in object_col_list:
change_to_target2(train_input,test_input,col,"enc_"+col)
# 変換前の列を削除
train_input = train_input.drop(object_col_list,axis=1)
test_input = test_input.drop(object_col_list,axis=1)
# 'id'の列を削除
train_input = train_input.drop('id',axis=1)
test_input = test_input.drop('id',axis=1)
# 訓練データ欠損確認
train_input.isnull().sum().sum()
# テストデータ欠損確認
test_input.isnull().sum().sum()
# 欠損値はターゲットエンコーディング時に学習データが少なくなって平均値が計算できなくなってしまうため発生。0埋め。
fill_all_null(train_input, 0)
fill_all_null(test_input, 0)
|
_____no_output_____
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
データの確認
|
# 訓練データとテストデータの列を確認
print(train_input.columns)
print(test_input.columns)
|
Index(['A1-level', 'A2-level', 'A3-level', 'A4-level', 'B1-level', 'B2-level',
'B3-level', 'B4-level', 'y', 'stage_size1', 'stage_size2', 'enc_period',
'enc_game-ver', 'enc_lobby-mode', 'enc_lobby', 'enc_mode', 'enc_stage',
'enc_A1-weapon', 'enc_A1-rank', 'enc_A2-weapon', 'enc_A2-rank',
'enc_A3-weapon', 'enc_A3-rank', 'enc_A4-weapon', 'enc_A4-rank',
'enc_B1-weapon', 'enc_B1-rank', 'enc_B2-weapon', 'enc_B2-rank',
'enc_B3-weapon', 'enc_B3-rank', 'enc_B4-weapon', 'enc_B4-rank',
'enc_A1-category1', 'enc_A1-category2', 'enc_A1-subweapon',
'enc_A1-special', 'enc_A1-mainweapon', 'enc_A1-reskin',
'enc_A2-category1', 'enc_A2-category2', 'enc_A2-subweapon',
'enc_A2-special', 'enc_A2-mainweapon', 'enc_A2-reskin',
'enc_A3-category1', 'enc_A3-category2', 'enc_A3-subweapon',
'enc_A3-special', 'enc_A3-mainweapon', 'enc_A3-reskin',
'enc_A4-category1', 'enc_A4-category2', 'enc_A4-subweapon',
'enc_A4-special', 'enc_A4-mainweapon', 'enc_A4-reskin',
'enc_B1-category1', 'enc_B1-category2', 'enc_B1-subweapon',
'enc_B1-special', 'enc_B1-mainweapon', 'enc_B1-reskin',
'enc_B2-category1', 'enc_B2-category2', 'enc_B2-subweapon',
'enc_B2-special', 'enc_B2-mainweapon', 'enc_B2-reskin',
'enc_B3-category1', 'enc_B3-category2', 'enc_B3-subweapon',
'enc_B3-special', 'enc_B3-mainweapon', 'enc_B3-reskin',
'enc_B4-category1', 'enc_B4-category2', 'enc_B4-subweapon',
'enc_B4-special', 'enc_B4-mainweapon', 'enc_B4-reskin'],
dtype='object')
Index(['A1-level', 'A2-level', 'A3-level', 'A4-level', 'B1-level', 'B2-level',
'B3-level', 'B4-level', 'stage_size1', 'stage_size2', 'enc_period',
'enc_game-ver', 'enc_lobby-mode', 'enc_lobby', 'enc_mode', 'enc_stage',
'enc_A1-weapon', 'enc_A1-rank', 'enc_A2-weapon', 'enc_A2-rank',
'enc_A3-weapon', 'enc_A3-rank', 'enc_A4-weapon', 'enc_A4-rank',
'enc_B1-weapon', 'enc_B1-rank', 'enc_B2-weapon', 'enc_B2-rank',
'enc_B3-weapon', 'enc_B3-rank', 'enc_B4-weapon', 'enc_B4-rank',
'enc_A1-category1', 'enc_A1-category2', 'enc_A1-subweapon',
'enc_A1-special', 'enc_A1-mainweapon', 'enc_A1-reskin',
'enc_A2-category1', 'enc_A2-category2', 'enc_A2-subweapon',
'enc_A2-special', 'enc_A2-mainweapon', 'enc_A2-reskin',
'enc_A3-category1', 'enc_A3-category2', 'enc_A3-subweapon',
'enc_A3-special', 'enc_A3-mainweapon', 'enc_A3-reskin',
'enc_A4-category1', 'enc_A4-category2', 'enc_A4-subweapon',
'enc_A4-special', 'enc_A4-mainweapon', 'enc_A4-reskin',
'enc_B1-category1', 'enc_B1-category2', 'enc_B1-subweapon',
'enc_B1-special', 'enc_B1-mainweapon', 'enc_B1-reskin',
'enc_B2-category1', 'enc_B2-category2', 'enc_B2-subweapon',
'enc_B2-special', 'enc_B2-mainweapon', 'enc_B2-reskin',
'enc_B3-category1', 'enc_B3-category2', 'enc_B3-subweapon',
'enc_B3-special', 'enc_B3-mainweapon', 'enc_B3-reskin',
'enc_B4-category1', 'enc_B4-category2', 'enc_B4-subweapon',
'enc_B4-special', 'enc_B4-mainweapon', 'enc_B4-reskin'],
dtype='object')
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
学習の準備
|
# 訓練データを説明変数と目的変数に分割
target = train_input['y']
train_x = train_input.drop('y',axis=1)
# LGBMのパラメータを設定
params = {
# 二値分類問題
'objective': 'binary',
# 損失関数は二値のlogloss
#'metric': 'auc',
'metric': 'binary_logloss',
# 最大イテレーション回数指定
'num_iterations' : 1000,
# early_stopping 回数指定
'early_stopping_rounds' : 100,
}
|
_____no_output_____
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
学習・予測の実行
|
# k-分割交差検証を使って学習&予測(K=10)
FOLD_NUM = 10
kf = KFold(n_splits=FOLD_NUM,
random_state=42)
#lgbmのラウンド数を定義
num_round = 10000
#検証時のスコアを初期化
scores = []
#テストデータの予測値を初期化
pred_cv = np.zeros(len(test.index))
for i, (tdx, vdx) in enumerate(kf.split(train_x, target)):
print(f'Fold : {i}')
# 訓練用データと検証用データに分割
X_train, X_valid, y_train, y_valid = train_x.iloc[tdx], train_x.iloc[vdx], target.values[tdx], target.values[vdx]
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_valid, y_valid)
# 学習の実行
model = lgb.train(params, lgb_train, num_boost_round=num_round,
valid_names=["train", "valid"], valid_sets=[lgb_train, lgb_valid],
verbose_eval=100)
# 検証データに対する予測値を求めて、勝敗(0 or 1)に変換
va_pred = np.round(model.predict(X_valid,num_iteration=model.best_iteration))
# accuracyスコアを計算
score_ = accuracy_score(y_valid, va_pred)
# フォールド毎の検証時のスコアを格納
scores.append(score_)
#テストデータに対する予測値を求める
submission = model.predict(test_input,num_iteration=model.best_iteration)
#テストデータに対する予測値をフォールド数で割って蓄積
#(フォールド毎の予測値の平均値を求めることと同じ)
pred_cv += submission/FOLD_NUM
# 最終的なテストデータに対する予測値を勝敗(0 or 1)に変換
pred_cv_int = np.round(pred_cv)
# 最終的なaccuracyスコアを平均値で出力
print('')
print('################################')
print('CV_score:'+ str(np.mean(scores)))
# 提出用ファイルを作成する
pd.DataFrame({"id": range(len(pred_cv_int)), "y": pred_cv_int.astype(np.int64) }).to_csv("../submit/submission_v0.2.csv", index=False)
|
_____no_output_____
|
MIT
|
program/lightGBM_base_v0.1.ipynb
|
tomokoochi/splatoon_competition
|
Angle-based Outlier Detector (ABOD)
|
clf1=ABOD(contamination=outliers_fraction)
clf1.fit(X)
y_pred1=clf1.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred1)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Cluster-based Local Outlier Factor (CBLOF)
|
clf2=CBLOF(contamination=outliers_fraction,check_estimator=False, random_state=random_state)
clf2.fit(X)
y_pred2=clf2.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred2)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Feature Bagging
|
clf3=FeatureBagging(LOF(n_neighbors=35),contamination=outliers_fraction,check_estimator=False,random_state=random_state)
clf3.fit(X)
y_pred3=clf3.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred3)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Histogram-base Outlier Detection (HBOS)
|
clf4=HBOS(alpha=0.1, contamination=0.037, n_bins=10, tol=0.9)
clf4.fit(X)
y_pred4=clf4.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred4)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Isolation Forest
|
clf5=IForest(contamination=outliers_fraction,random_state=random_state)
clf5.fit(X)
y_pred5=clf5.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred5)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
K Nearest Neighbors (KNN)
|
clf6=KNN(contamination=outliers_fraction)
clf6.fit(X)
y_pred6=clf6.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred6)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Average KNN
|
clf7=KNN(method='mean',contamination=outliers_fraction)
clf7.fit(X)
y_pred7=clf7.predict(X)
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_pred7)
|
_____no_output_____
|
MIT
|
outlierdetector_lib.ipynb
|
eaglewarrior/Anamoly-Detection
|
Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable.
|
food = "pizza"
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `restaurant` that stores your favorite place to eat that kind of food.
|
restaurant = "Delfinos pizza"
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables.
|
print ("I'm going to " + restaurant + " for some " + food)
|
I'm going to Delfinos pizza for some pizza
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `num_friends` equal to the number of friends you would like to eat with.
|
num_friends = 5
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Print a message `"I'm going with X friends"`, replacing the X with the number of friends.
|
print ("I'm going with " + str(num_friends) + " friends ")
|
I'm going with 5 friends
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`.
|
meal_price = 35.90
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Update (re-assign) the `meal_price` variable so it includes a 15% tip—that is, so the price is 15% higher. Output the variable.
|
meal_price = meal_price * 1.15
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable
|
total_cost = meal_price * num_friends
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `budget` representing your spending budget for a night out.
|
budget = 500
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people!
|
max_friends = int (budget/meal_price)
|
_____no_output_____
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable.
|
print ("food time!\n " * 5)
|
food time!
food time!
food time!
food time!
food time!
|
MIT
|
exercise-1/exercise.ipynb
|
ajm1813/ch4-python-intro
|
OPTIMIZATION PHASES List of variables Variable Description Comment $B$ Number of full blocks/pages that need the records $\lceil \frac{|T|}{R} \rceil$; $B \ll |T|$ $R$ Number of records per block/page $|T|$ Cardinality. Number of tuples of a table Size of table $D$ Time to access (read or write) a disk block Approximately 0'010 seconds $C$ Time for the CPU to process a record Approximately 10-9 $d$ Tree order Usually greater than 100 $h$ Tree depth minus 1 $\lceil \log_u |T| \rceil - 1$ $v$ Number of different values in a search $u$ $\%load \cdot 2d$ $k$ Number of repetitions of every value in the search ndist(A) Number of different values for attribute A Obtained from DB catalog max Maximum value of an attribute Obtained from DB catalog min Minimum value of an attribute Obtained from DB catalog $H$ Time to evaluate the hash function $M$ Memory pages for a join/sorting algorithm bits Bits per index block Domain Cardinality Maximum number of different values List of variables for intermediate resultsRecord length. $\sum$ attribute lengthi (+ control information)$$|R|$$Number of records per block$$R_R = \lfloor \frac{B}{|R|} \rfloor$$Number of blocks per table$$B_R = \lceil \frac{|R|}{R_R} \rceil$$ Cardinalities estimationSelectivity Factor. % of tuples in the output regarding the input. ~0: very selective ~1: not very selective$$\mathrm{SF}$$Output of cardinality estimation$$|O| = \mathrm{SF} \cdot |R|$$ or $$|O| = \mathrm{SF} \cdot |R1| \cdot |R2|$$ Selection$$|\mathrm{selection}(R)| = \mathrm{SF} \cdot |R|$$Join$$|\mathrm{join}(R, S)| = \mathrm{SF} \cdot |R| \cdot |S|$$Unions with repetitions$$|\mathrm{union}(R, S)| = |R| + |S|$$Unions without repetitions$$|\mathrm{union}(R, S)| = |R| + |S| - |\mathrm{union}(R, S)|$$Difference (anti-join)$$|\mathrm{difference}(R, S)| = |R| - |\mathrm{union}(R, S)|$$ Optimization phases**Asumptions*** Materialized views* Focus on Disk access time* Physical address of the record* Only consider cases 1. No index 2. Unordered B-Tree with addresses (B+) 3. Unordered Hash with addresses 4. Orderered B-Tree with addresses (Clustered) Unordered B-tree with addresses (B+)**Assumptions*** In every tree node **2d** addresses fit* Tree load 66% (2/3) Orderered B-Tree with addresses (Clustered)**Assumptions*** Tree load 66% (2/3) (index and table blocks) Unordered Hash with addresses**Assumptions*** No blocks for excess* The same number of entries fit in a bucket block as in a tree block* Bucket blocks at 80% (4/5) Space**No index**$$B$$**B+**$$\sum_1^{h+1} \lceil \frac{|T|}{u^i} \rceil + B$$**Clustered**$$\sum_1^{h+1} \lceil \frac{|T|}{u^i} \rceil + \lceil 1.5B \rceil$$**Hash**$$1 + \lceil 1.25(\frac{|T|}{2d}) \rceil + B$$ Example:$$\mathrm{Lvl_1} = \frac{|T|}{u}$$$$\mathrm{Lvl_2} = \frac{|T|}{u^2}$$$$\mathrm{Lvl_3} = \frac{|T|}{u^3}$$ Access paths Table scanThe whole table $u = \frac{2}{3} \cdot 2d$ **No index**$$B \cdot D$$**B+**Only useful for sort$$\lceil \frac{|T|}{u} \rceil \cdot D + |T| \cdot D$$**Clustered**$$\lceil 1.5B \rceil \cdot D$$**Hash**Useless$$\lceil 1.25(\frac{|T|}{2d}) \rceil \cdot D + |T| \cdot D $$ Search one tupleEquality of unique attribute $u = \frac{2}{3} \cdot 2d$ $h = \lceil \log_u |T| \rceil - 1$**No index**$$0.5B \cdot D$$**B+**$$h \cdot D + D$$**Clustered**$$h \cdot D + D$$**Hash**$$H + D + D$$ Search several tuplesIntervalNo unique attribute $u = \frac{2}{3} \cdot 2d$ $h = \lceil \log_u |T| \rceil - 1$ $|O|$: cardinality of Output $v$: value in range $k$: repetitions per value **No index**$$B \cdot D$$**B+**$$h \cdot D + \frac{|O| - 1}{u} \cdot D + |O| \cdot D$$**Clustered**$$h \cdot D + D + 1.5 \left( \frac{|O|-1}{R} \right) \cdot D$$**Hash**$$v = 1: 1 \cdot (H + D + k \cdot D) = H + D + k \cdot D$$$$v > 1: v \cdot (H + D + k \cdot D)$$$$v \;\mathrm{is\;unknown}: \mathrm{Useless}$$ Statistics in OracleDBA is responsible for the statistics.`ANALYZE [TABLE|INDEX|CLUSTER] [COMPUTE|ESTIMATE] STATISTICS;````sqlANALYZE TABLE departments COMPUTE STATISTICS; ANALYZE TABLE employees COMPUTE STATISTICS;````DBMS_STATS.GATHER_TABLE_STATS( , );````sqlDBMS_STATS.GATHER_TABLE_STATS("username", "departments");DBMS_STATS.GATHER_TABLE_STATS("username", "employees");```Kinds of statistics| Relations | Attributes ||:--|:--|| Cardinality | Length || Number of blocks | Domain cardinality || Average length of records | Number of existing different values || | Maximum value || | Minimum value |Main hypothesis in most DBMS* Uniform distribution of values for each attribute* Independence of attributes Selectivity Factor of a SelectionAssuming equi-probability of values`WHERE A = c`$$\mathrm{SF}(A = c) = \frac{1}{\mathrm{ndist}(A)}$$Assuming uniform distribution and $A \in [\min, \max]$ `WHERE A > c`$$\mathrm{SF}(A > c) = \frac{\max - c}{\max - \min} =\begin{cases}0 & \quad \text{if}\; c \geq \max \\1 & \quad \text{if}\; c < \min\end{cases}$$`WHERE A < c`$$\mathrm{SF}(A < c) = \frac{c - \min}{\max - \min} =\begin{cases}0 & \quad \text{if}\; c \leq \min \\1 & \quad \text{if}\; c > \max\end{cases}$$Assuming $\text{ndist}(A)$ is big enough`WHERE A <= c`$$\mathrm{SF}(A \leq c) = \mathrm{SF}(A < c)$$`WHERE A >= c`$$\mathrm{SF}(A \geq c) = \mathrm{SF}(A > c)$$Assuming P and Q statistically **independent**`WHERE P AND Q`$$\text{SF}(P \;\text{AND}\; Q) = \text{SF}(P) \cdot \text{SF}(Q)$$`WHERE P OR Q`$$\text{SF}(P \;\text{OR}\; Q) = \text{SF}(P) + \text{SF}(Q) - \text{SF}(P) \cdot \text{SF}(Q)$$`WHERE NOT P`$$\text{SF}(\text{NOT}\;P) = 1 - \text{SF}(P)$$`WHERE A IN (c1, c2, ... , cn)`$$\text{SF}(A \in (c_1, c_2, \dots, c_n)) = \min(1, \frac{n}{\mathrm{ndist}(A)})$$`WHERE A BETWEEN (c1, c2)`$$\text{SF}(c_1 \leq A \leq c_2) = \frac{\min(c_2, \max)-\max(c_1, \min)}{\max - \min}$$ Selectivity Factor of a JoinFor $R[A \theta B]S$Too difficult to approximate this general case. Usually, the required statistics are not available because itwould be too expensive to maintain them.Results depend on operator:$$\text{SF}(R[A\times B]S) = 1$$$$\text{SF}(R[A \neq B]S) = 1 $$$$\text{SF}(R[A=B]S) = \frac{1}{|R|} \begin{cases}S_B & \quad \text{is not null} \\S_B & \quad \text{FK to } R_A \\R_A & \quad \text{PK}\end{cases}$$If there is no FK$$\text{SF}(R[A=B]S) = \frac{1}{\max(\text{ndist}(A), \text{ndist}(B))}$$$$\text{SF}(R[A<B]S) = {^1/_2}$$$$\text{SF}(R[A \leq B]S) = {^1/_2}$$ Phases of physiscal optimization1. Alternatives generation2. Intermediate results estimation3. Cost estimation for each algorithm4. Choose the best option Example```sqlSELECT DISTINCT w.strengthFROM wines w, producers p, vintages vWHERE v.wineId = w.wineId AND p.prodId = v.prodId AND p.region = "Priorat" AND v.quantity > 100;```Tables have the following structuresProducers* Clustered by `prodId`* B+ by `region`Wines* Clustered by `wineId`Vintages* Clustered by `wineId` and `prodId`Statistics:Tables (extra space due to being clustered needs to be added)$$\begin{matrix}|P| = 10000 & |W| = 5000 & |V| = 100000 \\R_p = 12 & R_w = 10 & R_v = 20 \\B_p = 834 & B_w = 500 & B_v = 5000\end{matrix}$$AttributesprodId, wineId and strength: $|R_R| = 5$ bytes$\text{ndist(region)} = 30$$\min(\text{quantity}) = 10$ $\max(\text{quantity}) = 500$$\text{ndist(strength)} = 10$**System Parameters**$B = 500$ bytes per intermediate disk block$D = 1$$C = 0$$d = 75$DBMS:* Block Nested Loops (6 Memory pages, $M = 4$)* Row Nested Loops* Sort Match (with 3 memory pages for sorting, $M = 2$)
|
import math
c_P, c_W, c_V = 10000, 5000, 100000
R_p, R_w, R_v = 12, 10, 20
B_p, B_w, B_v = math.ceil(c_P / R_p), math.ceil(c_W / R_w), math.ceil(c_V / R_v)
print("Cardinality of {}: {}, Records: {}, number of Full Blocks: {}".format('P', c_P, R_p, B_p))
print("Cardinality of {}: {}, Records: {}, number of Full Blocks: {}".format('W', c_W, R_w, B_w))
print("Cardinality of {}: {}, Records: {}, number of Full Blocks: {}".format('V', c_V, R_v, B_v))
|
Cardinality of P: 10000, Records: 12, number of Full Blocks: 834
Cardinality of W: 5000, Records: 10, number of Full Blocks: 500
Cardinality of V: 100000, Records: 20, number of Full Blocks: 5000
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
Phase 1. Alternatives generation```sqlSELECT DISTINCT w.strengthFROM wines w, producers p, vintages vWHERE v.wineId = w.wineId AND p.prodId = v.prodId AND p.region = "Priorat" AND v.quantity > 100;```Change selection and join arrangement Phase 2. Intermediate results estimation```sqlSELECT DISTINCT w.strengthFROM wines w, producers p, vintages vWHERE v.wineId = w.wineId AND p.prodId = v.prodId AND p.region = "Priorat" AND v.quantity > 100;```**PT1 and PT2****Selection over V: V'**Record length of prodId and wineId:$$|R_{V'}| = 5 + 5 = 10$$Selectivity factor of selection:$$\mathrm{SF}(A > c) = \frac{\max - c}{\max - \min}$$Where $c = 100$ and the query specifies `v.quantity > 100`, then:$$\text{SF}(\text{quantity} > 100) = \frac{500 - 100}{500 - 10} = 0.81632$$Output cardinality of V':$$|O| = \text{SF} \cdot |R|$$$$|V'| = \text{SF}(\text{quantity} > 100) \cdot |V| = 0.81632 \cdot 100000 = 81632$$Number of records per block:$$R_{V'} = \lfloor \frac{B}{|R_{V'}|} \rfloor = \lfloor \frac{500}{10} \rfloor = 50$$Number of blocks needed for V':$$B_{V'} = \lceil \frac{|V'|}{R_{V'}} \rceil = \lceil \frac{81632}{50} \rceil = 1633$$
|
c = 100
min_v = 10
max_v = 500
SF_v_prime = (max_v - c) / (max_v - min_v)
print("Selectivity factor of V': {} \n".format(SF_v_prime))
C_v_prime = math.floor(SF_v_prime * c_V)
print("Cardinality output of V': {} \n".format(C_v_prime))
R_v_len = 5 + 5
B = 500
R_v_prime = math.floor(B / R_v_len)
print("V' number of records per block : {} \n".format(R_v_prime))
B_v_prime = math.ceil(C_v_prime / R_v_prime)
print("Blocks needed for V': {} \n".format(B_v_prime))
|
Selectivity factor of V': 0.8163265306122449
Cardinality output of V': 81632
V' number of records per block : 50
Blocks needed for V': 1633
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**Selection over P: P'**Record length of prodId:$$|R_{P'}| = 5$$Selectivity factor of selection:$$\mathrm{SF}(A = c) = \frac{1}{\text{ndist}(A)}$$Where $c = 'Priorat'$ and the query specifies `p.region = 'Priorat'`, then:$$\text{SF}(\text{region} = \text{Priorat}) = \frac{1}{30} = 0.033333$$Output cardinality of P':$$|O| = \text{SF} \cdot |R|$$$$|P'| = \text{SF}(\text{region} = \text{Priorat}) \cdot |P| = 0.03333 \cdot 10000 = 333$$Number of records per block:$$R_{P'} = \lfloor \frac{B}{|R_{P'}|} \rfloor = \lfloor \frac{500}{5} \rfloor = 100$$Number of blocks needed for P':$$B_{P'} = \lceil \frac{|P'|}{R_{P'}} \rceil = \lceil \frac{333}{100} \rceil = 4$$
|
ndist_region = 30
SF_p_prime = 1 / ndist_region
print("Selectivity factor of P': {} \n".format(SF_p_prime))
C_p_prime = math.floor(SF_p_prime * c_P)
print("Cardinality output of P': {} \n".format(C_p_prime))
R_p_len = 5
B = 500
R_p_prime = math.floor(B / R_p_len)
print("P' number of records per block : {} \n".format(R_p_prime))
B_p_prime = math.ceil(C_p_prime / R_p_prime)
print("Blocks needed for P': {} \n".format(B_p_prime))
|
Selectivity factor of P': 0.03333333333333333
Cardinality output of P': 333
P' number of records per block : 100
Blocks needed for P': 4
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**PT1****Join between W and V': WV'**Record length of `strength` and `prodId`:$$|R_{WV'}| = 5 + 5$$Selectivity factor$$\text{SF}_{WV'} = \frac{1}{|W|} = \frac{1}{5000} = 0.0002$$Cardinality ouput of WV'$$|WV'| = SF_{WV'} \cdot |W| \cdot |V'| = \frac{1}{5000} \cdot 5000 \cdot 81632 = 81632$$Number of rows per block for WV':$$R_{WV'} = \lfloor \frac{B}{|R_{WV'}|} \rfloor = \lfloor \frac{500}{5} \rfloor = 50$$Number of blocks used for WV':$$B_{WV'} = \lceil \frac{|WV'|}{R_{WV'}} \rceil = \lceil \frac{81632}{50} \rceil = 1633$$
|
SF_wv_prime = 1 / c_W
print("Selectivity factor of WV': {} \n".format(SF_wv_prime))
C_wv_prime = math.floor(SF_wv_prime * c_W * C_v_prime)
print("Cardinality output of WV': {} \n".format(C_wv_prime))
R_wv_prime_len = 5 + 5
B = 500
R_wv_prime = math.floor(B / R_wv_prime_len)
print("WV' number of records per block : {} \n".format(R_wv_prime))
B_wv_prime = math.ceil(C_wv_prime / R_wv_prime)
print("Blocks needed for WV': {} \n".format(B_wv_prime))
|
Selectivity factor of WV': 0.0002
Cardinality output of WV': 81632
WV' number of records per block : 50
Blocks needed for WV': 1633
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**Join between WV' and P': WV'P'**Record length for `strength`:$$|R_{WV'P'}| = 5$$Selectivity Factor, assuming quantity and region independent$$\text{SF(WV'} \cdot \text{P')} = \frac{1}{|P'|} \cdot \frac{1}{ndist(\text{region})} = \frac{1}{333 \cdot 30} = 10^{-4}$$Cardinality output$$|WV'P'| = SF_{WV'P'} \cdot |WV'| \cdot |P'| = 10^{-4} \cdot 81632 \cdot 333 = 2721$$Records per block$$R_{WV'P'} = \lfloor \frac{B}{|R_{WV'P'}|} \rfloor = \lfloor \frac{500}{5} \rfloor = 100$$Blocks for WV'P'$$B_{WV'P'} = \lceil \frac{|WV'P'|}{R_{WV'P'}} \rceil = \lceil \frac{1234}{100} \rceil = 28$$
|
SF_wvp_prime = (1 / C_p_prime) * (1 / ndist_region)
print("Selectivity factor of WV'P': {} \n".format(SF_wvp_prime))
C_wvp_prime = math.floor(SF_wvp_prime * C_wv_prime * C_p_prime)
print("Cardinality output of WV'P': {} \n".format(C_wvp_prime))
R_wvp_prime_len = 5
B = 500
R_wvp_prime = math.floor(B / R_wvp_prime_len)
print("WV'P' number of records per block : {} \n".format(R_wvp_prime))
B_wvp_prime = math.ceil(C_wvp_prime / R_wvp_prime)
print("Blocks needed for WV'P': {} \n".format(B_wvp_prime))
|
Selectivity factor of WV'P': 0.00010010010010010009
Cardinality output of WV'P': 2721
WV'P' number of records per block : 100
Blocks needed for WV'P': 28
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**PT2****Join V' and P': V'P'**Assuming independence of variablesRecord length for `wineId`$$|R_{V'P'}| = 5$$Selectivity factor$$\text{SF}_{V'P'} = \frac{1}{ndist(\text{region})} \cdot \frac{1}{|P'|} = \frac{1}{30} \cdot \frac{1}{333} = 10^{-4}$$Output cardinality$$|V'P'| = \text{SF}_{V'P'} \cdot |V'| \cdot |P'| = 10^{-4} \cdot 81632 \cdot 333 = 2721$$Number of records per blocks$$R_{V'P'} = \lfloor \frac{B}{R_{V'P'}} \rfloor = \lfloor \frac{500}{R_{5}} \rfloor = 100$$Blocks needed for V'P'$$B_{V'P'} = \lceil \frac{|V'P'|}{R_{V'P'}} \rceil = \lceil \frac{2721}{100} \rceil = 28$$
|
ndist_region = 30
SF_vp_prime = (1 / ndist_region) * (1 / C_p_prime)
print("Selectivity factor of V'P': {} \n".format(SF_vp_prime))
C_vp_prime = math.floor(SF_vp_prime * C_v_prime * C_p_prime)
print("Cardinality output of V'P': {} \n".format(C_vp_prime))
R_vp_len = 5
B = 500
R_vp_prime = math.floor(B / R_vp_len)
print("V'P' number of records per block : {} \n".format(R_vp_prime))
B_vp_prime = math.ceil(C_vp_prime / R_vp_prime)
print("Blocks needed for V'P': {} \n".format(B_vp_prime))
|
Selectivity factor of V'P': 0.00010010010010010009
Cardinality output of V'P': 2721
V'P' number of records per block : 100
Blocks needed for V'P': 28
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**Join between W and V'P': WV'P'**Record length for WV'P'$$|R_{WV'P'}| = 5$$Selectivity Factor for WV'P'$$\text{SF} = \frac{1}{|W|} = \frac{1}{5000} = 0.0002$$Cardinality Output$$|WV'P'| = SF \cdot |W| \cdot |V'P'| = 10^{-4} \cdot 5000 \cdot 2721 = 2721$$Number of records per block$$R_{WV'P'} = \lfloor \frac{B}{|R_{WV'P'}|} \rfloor = \lfloor \frac{500}{5} \rfloor = 100$$Blocks needes for WV'P'$$B_{WV'P'} = \lceil \frac{|WV'P'|}{R_{WV'P'}} \rceil = \lceil \frac{2721}{100} \rceil = 28$$
|
SF_wv_pr_p_pr = 1 / c_W
print("Selectivity factor of WV'P': {} \n".format(SF_wv_pr_p_pr))
C_wv_pr_p_pr = math.floor(SF_wv_pr_p_pr * c_W * C_vp_prime)
print("Cardinality output of WV'P': {} \n".format(C_wv_pr_p_pr))
R_wv_pr_p_pr_len = 5
B = 500
R_wv_pr_p_pr = math.floor(B / R_wv_pr_p_pr_len)
print("WV'P' number of records per block : {} \n".format(R_wv_pr_p_pr))
B_wv_pr_p_pr = math.ceil(C_wv_pr_p_pr / R_wv_pr_p_pr)
print("Blocks needed for WV'P': {} \n".format(B_wv_pr_p_pr))
|
Selectivity factor of WV'P': 0.0002
Cardinality output of WV'P': 2721
WV'P' number of records per block : 100
Blocks needed for WV'P': 28
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**PT1/PT2****Final result = O**Record length$$|R_O| = 5$$Output cardinality$$|O| = \text{ndist}(\text{strength}) = 100$$Number of records$$R_O = \lfloor \frac{B}{|R_O|} \rfloor = \lfloor \frac{500}{5} \rfloor = 100$$Blocks needed$$B_O = \lceil \frac{|O|}{R_O} \rceil = \lceil \frac{100}{100} \rceil = 1$$
|
ndist_strength = 100
C_o = ndist_strength
print("Cardinality output of O: {} \n".format(C_o))
R_o_len = 5
B = 500
R_o = math.floor(B / R_o_len)
print("O number of records per block : {} \n".format(R_o))
B_o = math.ceil(C_o / R_o)
print("Blocks needed for O: {} \n".format(B_o))
|
Cardinality output of O: 100
O number of records per block : 100
Blocks needed for O: 1
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**Map result** Phase 3. Cost estimation for each algorithmRecall:$$u = \frac{2}{3} \cdot 2(75) = 100$$**AP1/AP2****Selection over V: V'**Recall that for Vintages is clustered by wineId and prodIdAvailable access paths: No index$$\text{cost}_{\text{scan}}(V') = \lceil 1.5 B_{V} \rceil \cdot D = \lceil 1.5 \cdot 5000 \rceil \cdot 1 = 7500$$Chosen algorithm: **Scan****Selection over P: P'**Available access paths: B+ and no indexFor a table scan$$\text{cost}_{\text{scan}}(P') = \lceil 1.5 B_{P} \rceil \cdot D = \lceil 1.5 \cdot 834 \rceil \cdot 1 = 1251$$Tree depth of h for B+ is:$$h = \lceil \log_u |P| \rceil - 1 = \lceil \log_u |P| \rceil - 1 = \lceil \log_{100} 10000 \rceil - 1 = 1$$For an index of several tuples$$\begin{align}\text{cost}_{B^+}(P') & = h \cdot D + \frac{|P'| - 1}{u} \cdot D + |P'| \cdot D \\& = h \cdot D + \frac{SF_{\text{region = 'Priorat'}} \cdot |P| - 1}{u} \cdot D + SF_{\text{region = 'Priorat'}} \cdot |P| \cdot D \\& = 1 \cdot 1 + \frac{{^1/_{30}} \cdot 10000 - 1}{100} \cdot D + {^1/_{30}} \cdot 10000 \cdot 1 \\& = 1 + \frac{332}{100} + 333 \\& = 337.33\end{align}$$Chosen algorithm: **B+**
|
load = 2/3
d = 75
u = load * (2 * d)
h = math.ceil(math.log(c_P, u)) - 1
D = 1
print("load is: {}\nd is: {}\nu is: {}\nh is: {}\nD is: {}\n".format(load, d, u, h, D))
cost_scan_p = math.ceil(1.5 * B_p) * D
cost_bplus_p = (h * D) + ((C_p_prime / u) * D) + (C_p_prime * D)
print("Cost of scan is: {} \nCost of B+ is: {}".format(cost_scan_p, cost_bplus_p))
|
load is: 0.6666666666666666
d is: 75
u is: 100.0
h is: 1
D is: 1
Cost of scan is: 1251
Cost of B+ is: 337.33
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**PT1****Join over W and V': WV'**Available algorithms: Block Nested Loops (BML), Row Nested Loops (RML) and Sort-Match (SM)*Block Nested Loops*Recall:$$M = 4$$$\lceil 1.5 B_{W} \rceil < B_{V'}$ we use the commutative property of joins$$\begin{align}\text{cost}_{\text{BML}}(WV') & = \lceil 1.5 B_{W} \rceil + \lceil \frac{1.5 B_{W}}{M} \rceil \cdot B_{V'} \\& = \lceil 1.5 \cdot 500 \rceil + \lceil \frac{1.5\cdot 500}{4} \rceil \cdot 1633 \\& = 307,754\end{align}$$*Row Nested Loops*Look for attributes of W$V'$ does not use extra space any more for being ordered$$\begin{align}\text{cost}_{\text{RML}}(WV') & = B_{V'} + |V'| \cdot \left( \lceil \log_u |W| \rceil - 1 + 1 + (\frac{1.5(k-1)}{10} \right) \\& = 1633 + 81,632 \cdot \left( \lceil \log_{100} 5000 \rceil - 1 + 1 \right) \\& = 164,887\end{align}$$Note: This wasn't explained. Maybe $k = 1$ but needs confirmation.*Sort-Match*$W$ is ordered by `wineID`, $V'$ is still ordered y `wineID` and `prodID`.$$\text{cost}_{\text{SM}}(WV') = \lceil 1.5 B_{W} \rceil + B_{V'} = \lceil 1.5 \cdot 500 \rceil + 1633 = 2383$$Chosen algorithm: **Sort-Match** **Join between WV' and P': WV'P'***Block Nested Loops*$B_{P'} < B_{WV'}$ we use the commutative property of joins$$\begin{align}\text{cost}_{\text{BML}}(WV'P') & = B_{P'} + \lceil \frac{B_{P'}}{M} \rceil \cdot B_{WV'} \\& = 4 + \lceil \frac{4}{4} \rceil \cdot 1633 \\& = 1637\end{align}$$Note: It isn't explained why BML is analyzed but not RML.*Sort-Match*Neither WV’ nor P’ are ordered by `prodID`$$\begin{align}\text{cost}_{\text{SM}}(WV'P') &= 2 B_{WV'} \cdot \lceil \log_2 B_{WV'} \rceil + 2 B_{P'} \cdot \lceil \log_2 B_{P'} \rceil + B_{WV'} + B_{P'}\\&= 2 \cdot 1633 \cdot \lceil \log_2 1633 \rceil + 2 \cdot 4 \cdot \lceil \log_2 4 \rceil + 1633 + 4 \\&= 37,579\end{align}$$Chosen algorithm: **Block Nested Loop**
|
print("B_p' is {}\nB_wv' is {}".format(B_p_prime, B_wv_prime))
(2 * B_wv_prime * math.ceil(math.log(B_wv_prime, 2))) + (2 * B_p_prime * math.ceil(math.log(B_p_prime, 2))) + B_wv_prime + B_p_prime
|
_____no_output_____
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**PT2****Join between V' and P': V'P'Available algorithms: BNL and SM.*Block Nested Loops*$B_{P'} < B_{V'}$ we use the commutative property of joins$$\begin{align}\text{cost}_{\text{BML}}(V'P') & = B_{P'} + \lceil \frac{B_{P'}}{M} \rceil \cdot B_{V'} \\& = 4 + \lceil \frac{4}{4} \rceil \cdot 1633 \\& = 1637\end{align}$$*Sort-Match*Neither V’ nor P’ are ordered by `prodID`$$\begin{align}\text{cost}_{\text{SM}}(V'P') &= 2 B_{V'} \cdot \lceil \log_2 B_{V'} \rceil + 2 B_{P'} \cdot \lceil \log_2 B_{P'} \rceil + B_{V'} + B_{P'}\\&= 2 \cdot 1633 \cdot \lceil \log_2 1633 \rceil + 2 \cdot 4 \cdot \lceil \log_2 4 \rceil + 1633 + 4 \\&= 37,579\end{align}$$Chosen algorithm: **Block Nested Loop**
|
print("B_p' is {}\nB_v' is {}".format(B_p_prime, B_v_prime))
|
B_p' is 4
B_v' is 1633
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
**Join between W and V'P': WV'P'**Available algorithms: Block Nested Loops (BML), Row Nested Loops (RML) and Sort-Match (SM)*Block Nested Loops*$B_{V'P'} < \lceil 1.5 B_{W} \rceil$ we use the commutative property of joins$$\begin{align}\text{cost}_{\text{BML}}(WV'P') & = B_{V'P'} + \lceil \frac{B_{V'P'}}{M} \rceil \cdot \lceil 1.5 B_{W} \rceil \\& = 28 + \lceil \frac{28}{4} \rceil \cdot \lceil 750 \rceil \\& = 5278\end{align}$$*Row Nested Loops*Look for attributes of W$$\begin{align}\text{cost}_{\text{RML}}(WV'P') & = B_{V'P'} + |V'P'| \cdot \left( \lceil \log_u |W| \rceil - 1 + 1 + (\frac{1.5(k-1)}{10} \right) \\& = 28 + 2721 \cdot \left( \lceil \log_{100} 5000 \rceil - 1 + 1 \right) \\& = 5470\end{align}$$*Sort-Match*W is ordered by `wineID`, V'P' is not sorted by `wineID`$$\begin{align}\text{cost}_{\text{SM}}(WV'P') &= 2 B_{V'P'} \cdot \lceil \log_2 B_{V'P'} \rceil + \lceil 1.5 B_{W} \rceil + B_{V'P'} \\&= 2 \cdot 28 \cdot \lceil \log_2 28 \rceil + \lceil 1.5 \cdot 500 \rceil + 28 \\&= 1058\end{align}$$Chosen algorithm: **Sort-Match**
|
print("B_v'p' is {}\n1.5*B_w is {}\n|V'P'| is {}".format(B_vp_prime, math.ceil(1.5*B_w), C_vp_prime))
28 + math.ceil(28/4) * 750
28+(2721*(math.ceil(math.log(5000, 100)) - 1 + 1))
Cost_v_prime = 1633 + 7500
Cost_p_prime = 4 + 337
Cost_wv = 1633 + 2383
Cost_vp = 28 + 1637
Cost_wvp_pt1 = 28 + 1637
Cost_wvp_pt2 = 28 + 1058
Cost_o = 1 + 252
Cost_pt1 = Cost_v_prime + Cost_p_prime + Cost_wv + Cost_wvp_pt1 + Cost_o
Cost_pt2 = Cost_v_prime + Cost_p_prime + Cost_vp + Cost_wvp_pt2 + Cost_o
print("Total cost of:\nPT1: {}\nPT2: {}".format(Cost_pt1, Cost_pt2))
|
Total cost of:
PT1: 15408
PT2: 12478
|
MIT
|
ADSDB/Optimization-Costs.ipynb
|
MiguelHeCa/miri-notes
|
Спортивный анализ данных. Платформа Kaggle Урок 1. Введение в спортивный анализ данных, Exploration Data Analysis Домашняя работа к уроку 1 Ссылка на наборы данных: https://drive.google.com/file/d/1j8zuKbI-PW5qKwhybP4S0EtugbPqmeyX/view?usp=sharing Задание 1 Сделать базовый анализ данных: вывести размерность датасетов, посчитать базовые статистики, выполнить анализ пропусков, сделать выводы.
|
# В работе. Как-то все наложилось. Надеюсь на этой неделе все нагнать.
# Посмотрел. Очень серьезный курс, темы сложные. Зря его вынесли во вне четверти.
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 2 Сделать базовый анализ целевой переменной, сделать выводы;
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 3 Построить распределение признаков в зависимости от значения целевой переменной и распределение признаков для обучающей и тестовой выборки (если машина не позволяет построить распределение для всех признаков, то выполнить задание для признаков var_0, var_1, var_2, var_5, var_9, var_10, var_13, var_20, var_26, var_40, var_55, var_80, var_106, var_109, var_139, var_175, var_184, var_196), сделать выводы;
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 4 Построить распределение основных статистики признаков (среднее, стандартное отклонение) в разрезе целевой переменной и распределение основных статистик обучающей и тестовой выборки, сделать выводы;
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 5 Построить распределение коэффициентов корреляции между признаками. Есть ли зависимость между признаками (будем считать, что связь между признаками отсутствует, если коэффициент корреляции < 0.2)?
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 6 Выявить 10 признаков, которые обладают наибольшей нелинейной связью с целевой переменной.
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Задание 7 Провести анализ идентичности распределения признаков на обучающей и тестовой выборках, сделать выводы.
|
# В работе
|
_____no_output_____
|
MIT
|
webinar_1/Lesson 1.ipynb
|
superbe/KagglePlatform
|
Euler Problem 206=================Find the unique positive integer whose square has the form 1_2_3_4_5_6_7_8_9_0,where each "&95;" is a single digit.
|
from itertools import product
for a, b, c, d in product(range(10), repeat=4):
N = 10203040596979899
N += a*10**15 + b*10**13 + c*10**11 + d*10**9
sqrtN = int(N**0.5)
s = str(sqrtN**2)
if s[0:17:2] == '123456789':
print(sqrtN * 10)
break
|
1389019170
|
MIT
|
Euler 206 - Concealed square.ipynb
|
Radcliffe/project-euler
|
BLU15 - Model CSI Intro:It often happens that your data distribution changes with time. More than that, sometimes you don't know how a model was trained and what was the original training data. In this learning unit we're going to try to identify whether an existing model meets our expectations and redeploy it. Problem statement:As an example, we're going to use the same problem that you met in the last BLU. You're already familiar with the problem, but just as a reminder: > The police department has received lots of complaints about its stop and search policy. Every time a car is stopped, the police officers have to decide whether or not to search the car for contraband. According to critics, these searches have a bias against people of certain backgrounds. You got a model from your client, and **here is the model's description:**> It's a LightGBM model (LGBMClassifier) trained on the following features:> - Department Name> - InterventionLocationName> - InterventionReasonCode> - ReportingOfficerIdentificationID> - ResidentIndicator> - SearchAuthorizationCode> - StatuteReason> - SubjectAge> - SubjectEthnicityCode> - SubjectRaceCode> - SubjectSexCode> - TownResidentIndicator> All the categorical feature were one-hot encoded. The only numerical feature (SubjectAge) was not changed. The rows that contain rare categorical features (the ones that appear less than N times in the dataset) were removed. Check the original_model.ipynb notebook for more details. P.S., if you never heard about lightgbm, XGboost and other gradient boosting, I highly recommend you to read this [article](https://mlcourse.ai/articles/topic10-boosting/) or watch these videos: [part1](https://www.youtube.com/watch?v=g0ZOtzZqdqk), [part2](https://www.youtube.com/watch?v=V5158Oug4W8)It's not essential for this BLU, so you might leave this link as a desert after you go through the learning materials and solve the exercises, but these are very good models you can use later on, so I suggest reading about them. **Here are the requirements that the police department created:**> - A minimum 50% success rate for searches (when a car is searched, it should be at least 50% likely that contraband is found)> - No police sub-department should have a discrepancy bigger than 5% between the search success rate between protected classes (race, ethnicity, gender) > - The largest possible amount of contraband found, given the constraints above. **And here is the description of how the current model succeeds with the requirements:**- precision score = 50%- recall = 89.3%- roc_auc_score for the probability predictions = 82.7%The precision and recall above are met for probability predictions with a specified threshold equal to **0.21073452797732833**It's not said whether the second requirement is met, and as it was not met in the previous learning unit, let's ignore it for now. Model diagnosing: Let's firstly try to compare these models to the ones that we created in the previous BLU:| Model | Baseline | Second iteration | New model | Best model ||-------------------|---------|--------|--------|--------| | Requirement 1 - success rate | 0.53 | 0.38 | 0.5 | 1 || Requirement 2 - global discrimination (race) | 0.105 | 0.11 | NaN | 1 || Requirement 2 - global discrimination (sex) | 0.012 | 0.014 | NaN | 1 || Requirement 2 - global discrimination (ethnicity) | 0.114 | 0.101 | NaN | 2 | | Requirement 2 - department discrimination (race) | 27 | 17 | NaN | 2 || Requirement 2 - department discrimination (sex) | 19 | 23 | NaN | 1 || Requirement 2 - department discrimination (ethnicity) | 24 | NaN | 23 | 2 || Requirement 3 - contraband found (Recall) | 0.65 | 0.76 | 0.893 | 3 | As we can see, the last model has the exact required success rate (Requirement 1) as we need, and a very good Recall (Requirement 3).But it might be risky to have such a specific threshold, as we might end up success rate < 0.5 really quickly. It might be a better idea to have a bigger threshold (e.g. 0.25), but let's see. Let's imagine that the model was trained long time ago.And now you're in the future trying to evaluate the model, because things might have changed. Data distribution is not always the same, so something that used to work even a year ago could be completely wrong today. Especially in 2020! First of all, let's start the server which is running this model.Open the shell, ```shpython protected_server.py``` And read a csv files with new observations from 2020:
|
import joblib
import pandas as pd
import json
import joblib
import pickle
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics import confusion_matrix
import requests
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import precision_recall_curve
%matplotlib inline
df = pd.read_csv('./data/new_observations.csv')
df.head()
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Let's start from sending all those requests and comparing the model prediction results with the target values. The model is already prepared to convert our observations to the format its expecting, the only thing we need to change is making department and intervention location names lowercase, and we're good to extract fields from the dataframe and put them to the post request.
|
# lowercaes departments and location names
df['Department Name'] = df['Department Name'].apply(lambda x: str(x).lower())
df['InterventionLocationName'] = df['InterventionLocationName'].apply(lambda x: str(x).lower())
url = "http://127.0.0.1:5000/predict"
headers = {'Content-Type': 'application/json'}
def send_request(index: int, obs: dict, url: str, headers: dict):
observation = {
"id": index,
"observation": {
"Department Name": obs["Department Name"],
"InterventionLocationName": obs["InterventionLocationName"],
"InterventionReasonCode": obs["InterventionReasonCode"],
"ReportingOfficerIdentificationID": obs["ReportingOfficerIdentificationID"],
"ResidentIndicator": obs["ResidentIndicator"],
"SearchAuthorizationCode": obs["SearchAuthorizationCode"],
"StatuteReason": obs["StatuteReason"],
"SubjectAge": obs["SubjectAge"],
"SubjectEthnicityCode": obs["SubjectEthnicityCode"],
"SubjectRaceCode": obs["SubjectRaceCode"],
"SubjectSexCode": obs["SubjectSexCode"],
"TownResidentIndicator": obs["TownResidentIndicator"]
}
}
r = requests.post(url, data=json.dumps(observation), headers=headers)
result = json.loads(r.text)
return result
responses = [send_request(i, obs, url, headers) for i, obs in df.iterrows()]
print(responses[0])
df['proba'] = [r['proba'] for r in responses]
threshold = 0.21073452797732833
# we're going to use the threshold we got from the client
df['prediction'] = [1 if p >= threshold else 0 for p in df['proba']]
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
**NOTE:** We could also load the model and make predictions locally (without using the api), but:1. I wanted to show you how you might send requests in a similar situation2. If you have a running API and some model file, you always need to understand how the API works (if it makes any kind of data preprocessing), which might sometimes be complicated, and if you're trying to analyze the model running in production, you still need to make sure that the local predictions you do are equal to the one that the production api does.
|
confusion_matrix(df['ContrabandIndicator'], df['prediction'])
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
If you're not familiar with confusion matrixes, **here is an explanation of the values:** These values don't seem to be good. Let's once again take a look on the client's requirements and see if we still meet them: > A minimum 50% success rate for searches (when a car is searched, it should be at least 50% likely that contraband is found)
|
def verify_success_rate_above(y_true, y_pred, min_success_rate=0.5):
"""
Verifies the success rate on a test set is above a provided minimum
"""
precision = precision_score(y_true, y_pred, pos_label=True)
is_satisfied = (precision >= min_success_rate)
return is_satisfied, precision
verify_success_rate_above(df['ContrabandIndicator'], df['prediction'], 0.5)
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
 > The largest possible amount of contraband found, given the constraints above.As the client says, their model recall was 0.893. And what now?
|
def verify_amount_found(y_true, y_pred):
"""
Verifies the amout of contraband found in the test dataset - a.k.a the recall in our test set
"""
recall = recall_score(y_true, y_pred, pos_label=True)
return recall
verify_amount_found(df['ContrabandIndicator'], df['prediction'])
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Okay, relax, it happens. Let's start from checking different thresholds. Maybe the selected threshold was to specific and doesn't work anymore. What about 0.25?
|
threshold = 0.25
df['prediction'] = [1 if p >= threshold else 0 for p in df['proba']]
verify_success_rate_above(df['ContrabandIndicator'], df['prediction'], 0.5)
verify_amount_found(df['ContrabandIndicator'], df['prediction'])
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Okay, let's try the same technique to identify the best threshold as they originally did. Maybe we find something good enough. It's not a good idea to verify such things on the test data, but we're going to use it just to confirm the model's performance, not to select the threshold.
|
precision, recall, thresholds = precision_recall_curve(df['ContrabandIndicator'], df['proba'])
precision = precision[:-1]
recall = recall[:-1]
fig=plt.figure()
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
ax1.hlines(y=0.5,xmin=0, xmax=1, colors='red')
ax1.plot(thresholds,precision)
ax2.plot(thresholds,recall)
ax1.get_shared_x_axes().join(ax1, ax2)
ax1.set_xticklabels([])
plt.xlabel('Threshold')
ax1.set_title('Precision')
ax2.set_title('Recall')
plt.show()
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
So what do we see? There is some threshold value (around 0.6) that gives us precision >= 0.5. But the threshold is so big, that the recall at this point is really-really low. Let's calculate the exact values:
|
min_index = [i for i, prec in enumerate(precision) if prec >= 0.5][0]
print(min_index)
thresholds[min_index]
precision[min_index]
recall[min_index]
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Before we move on, we need to understand why this happens, so that we can decide what kind of action to perform. Let's try to analyze the changes in data and discuss different things we might want to do.
|
old_df = pd.read_csv('./data/train_searched.csv')
old_df.head()
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
We're going to apply the same changes to the dataset as in the original model notebook unit to understand what was the original data like and how the current dataset differs.
|
old_df = old_df[(old_df['VehicleSearchedIndicator']==True)]
# lowercaes departments and location names
old_df['Department Name'] = old_df['Department Name'].apply(lambda x: str(x).lower())
old_df['InterventionLocationName'] = old_df['InterventionLocationName'].apply(lambda x: str(x).lower())
train_features = old_df.columns.drop(['VehicleSearchedIndicator', 'ContrabandIndicator'])
categorical_features = train_features.drop(['InterventionDateTime', 'SubjectAge'])
numerical_features = ['SubjectAge']
target = 'ContrabandIndicator'
# I'm going to remove less common features.
# Let's create a dictionary with the minimum required number of appearences
min_frequency = {
"Department Name": 50,
"InterventionLocationName": 50,
"ReportingOfficerIdentificationID": 30,
"StatuteReason": 10
}
def filter_values(df: pd.DataFrame, column_name: str, threshold: int):
value_counts = df[column_name].value_counts()
to_keep = value_counts[value_counts > threshold].index
filtered = df[df[column_name].isin(to_keep)]
return filtered
for feature, threshold in min_frequency.items():
old_df = filter_values(old_df, feature, threshold)
old_df.shape
old_df.head()
old_df['ContrabandIndicator'].value_counts(normalize=True)
df['ContrabandIndicator'].value_counts(normalize=True)
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Looks like we got a bit more contraband now, and it's already a good sign:if the training data had a different target feature distribution than the test set, the model's predictions might have a different distribution as well. It's a good practice to have the same target feature distribution both in training and test sets. Let's investigate further
|
new_department_names = df['Department Name'].unique()
old_department_names = old_df['Department Name'].unique()
unknown_departments = [department for department in new_department_names if department not in old_department_names]
len(unknown_departments)
df[df['Department Name'].isin(unknown_departments)].shape
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
So we have 10 departments that the original model was not trained on, but they are only 23 rows from the test set. Let's repeat the same thing for the Intervention Location names
|
new_location_names = df['InterventionLocationName'].unique()
old_location_names = old_df['InterventionLocationName'].unique()
unknown_locations = [location for location in new_location_names if location not in old_location_names]
len(unknown_locations)
df[df['InterventionLocationName'].isin(unknown_locations)].shape[0]
print('unknown locations: ', df[df['InterventionLocationName'].isin(unknown_locations)].shape[0] * 100 / df.shape[0], '%')
|
unknown locations: 5.3 %
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Alright, a bit more of unknown locations. We don't know if the feature was important for the model, so these 5.3% of unknown locations might be important or not.But it's worth keeping it in mind. **Here are a few ideas of what we could try to do:**1. Reanalyze the filtered locations, e.g. filter more rare ones.2. Create a new category for the rare locations3. Analyze the unknown locations for containing typosLet's go further and take a look on the relation between department names and the number of contrabands they find.We're going to select the most common department names, and then see the percentage of contraband indicator in each one for the training and test sets
|
common_departments = df['Department Name'].value_counts().head(20).index
departments_new = df[df['Department Name'].isin(common_departments)]
departments_old = old_df[old_df['Department Name'].isin(common_departments)]
pd.crosstab(departments_new['ContrabandIndicator'], departments_new['Department Name'], normalize="columns")
pd.crosstab(departments_old['ContrabandIndicator'], departments_old['Department Name'], normalize="columns")
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
We can clearly see that some departments got a huge difference in the contraband indicator.E.g. Bridgeport used to have 93% of False contrabands, and now has only 62%.Similar situation with Danbury and New Haven. Why? Hard to say. There are really a lot of variables here. Maybe the departments got instructed on how to look for contraband. But we might need to retrain the model. Let's just finish reviewing other columns.
|
common_location = df['InterventionLocationName'].value_counts().head(20).index
locations_new = df[df['InterventionLocationName'].isin(common_location)]
locations_old = old_df[old_df['InterventionLocationName'].isin(common_location)]
pd.crosstab(locations_new['ContrabandIndicator'], locations_new['InterventionLocationName'], normalize="columns")
pd.crosstab(locations_old['ContrabandIndicator'], locations_old['InterventionLocationName'], normalize="columns")
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
What do we see? First of all, the InterventionLocationName and the Department Name are often same.It sounds pretty logic, as probably policeman's usually work in the area of their department. But we could try to create a feature saying whether InterventionLocationName is equal to the Department Name.Or maybe we could just get rid of one of them, if all the values are equal. What else?Well, There are similar changes in the Contraband distribution as in Department Name case.Let's move on:
|
pd.crosstab(df['ContrabandIndicator'], df['InterventionReasonCode'], normalize="columns")
pd.crosstab(old_df['ContrabandIndicator'], old_df['InterventionReasonCode'], normalize="columns")
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
There are some small changes, but they don't seem to be significant. Especially that all the 3 values have around 33% of Contraband.Time for officers:
|
df['ReportingOfficerIdentificationID'].value_counts()
filter_values(df, 'ReportingOfficerIdentificationID', 2)['ReportingOfficerIdentificationID'].nunique()
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Well, looks like there are a lot of unique values for the officer id (1166 for 2000 records), and there are not so many common ones (only 206 officers have more than 2 rows in the dataset) so it doesn't make much sense to analyze it. Let's quickly go throw the rest of the columns:
|
df.columns
rest = ['ResidentIndicator', 'SearchAuthorizationCode',
'StatuteReason', 'SubjectEthnicityCode',
'SubjectRaceCode', 'SubjectSexCode','TownResidentIndicator']
for col in rest:
display(pd.crosstab(df['ContrabandIndicator'], df[col], normalize="columns"))
display(pd.crosstab(old_df['ContrabandIndicator'], old_df[col], normalize="columns"))
|
_____no_output_____
|
MIT
|
S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb
|
LDSSA/batch4-students
|
Reviewing Automated Machine Learning ExplanationsAs machine learning becomes more and more and more prevelant, the predictions made by models have greater influence over many aspects of our society. For example, machine learning models are an increasingly significant factor in how banks decide to grant loans or doctors prioritise treatments. The ability to interpret and explain models is increasingly important, so that the rationale for the predictions made by machine learning models can be explained and justified, and any inadvertant bias in the model can be identified.When you use automated machine learning to train a model, you have the option to generate explanations of feature importance that quantify the extent to which each feature influences label prediction. In this lab, you'll explore the explanations generated by an automated machine learning experiment. Connect to Your WorkspaceThe first thing you need to do is to connect to your workspace using the Azure ML SDK.> **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate.
|
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
|
_____no_output_____
|
MIT
|
09A - Reviewing Automated Machine Learning Explanations.ipynb
|
LucaSavio/DP100
|
Run an Automated Machine Learning ExperimentTo reduce time in this lab, you'll run an automated machine learning experiment with only three iterations.Note that the **model_explainability** configuration option is set to **True**.
|
import pandas as pd
from azureml.train.automl import AutoMLConfig
from azureml.core.experiment import Experiment
from azureml.widgets import RunDetails
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core import Dataset
cluster_name = "gmalc-aml-clust" # Change to your compute cluster name
# Prepare data for training
default_ds = ws.get_default_datastore()
if 'diabetes dataset' not in ws.datasets:
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
print('Dataset registered.')
except Exception as ex:
print(ex)
else:
print('Dataset already registered.')
train_data = ws.datasets.get("diabetes dataset")
# Configure Auto ML
automl_config = AutoMLConfig(name='Automated ML Experiment',
task='classification',
compute_target='local',
enable_local_managed=True,
training_data = train_data,
n_cross_validations = 2,
label_column_name='Diabetic',
iterations=3,
primary_metric = 'AUC_weighted',
max_concurrent_iterations=3,
featurization='off',
model_explainability=True # Generate feature importance!
)
# Run the Auto ML experiment
print('Submitting Auto ML experiment...')
automl_experiment = Experiment(ws, 'diabetes_automl')
automl_run = automl_experiment.submit(automl_config)
automl_run.wait_for_completion(show_output=True)
RunDetails(automl_run).show()
|
_____no_output_____
|
MIT
|
09A - Reviewing Automated Machine Learning Explanations.ipynb
|
LucaSavio/DP100
|
View Feature ImportanceWhen the experiment has completed in the widget above, click the run that produced the best result to see its details. Then scroll to the bottom of the visualizations to see the relative feature importance.You can also view feature importance for the best model produced by the experiment by using the **ExplanationClient** class:
|
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
from azureml.core.run import Run
# Wait for the best model explanation run to complete
model_explainability_run_id = automl_run.get_properties().get('ModelExplainRunId')
print(model_explainability_run_id)
if model_explainability_run_id is not None:
model_explainability_run = Run(experiment=automl_experiment, run_id=model_explainability_run_id)
model_explainability_run.wait_for_completion(show_output=True)
# Get the best model (2nd item in outputs)
best_run, fitted_model = automl_run.get_output()
# Get the feature explanations
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation()
feature_importances = engineered_explanations.get_feature_importance_dict()
# Overall feature importance
print('Feature\tImportance')
for key, value in feature_importances.items():
print(key, '\t', value)
|
_____no_output_____
|
MIT
|
09A - Reviewing Automated Machine Learning Explanations.ipynb
|
LucaSavio/DP100
|
Classifying Fashion-MNISTNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.First off, let's load the dataset through torchvision.
|
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
|
_____no_output_____
|
MIT
|
intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb
|
sizigia/deep-learning-v2-pytorch
|
Here we can see one of the images.
|
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
|
_____no_output_____
|
MIT
|
intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb
|
sizigia/deep-learning-v2-pytorch
|
Building the networkHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.
|
from torch import nn, optim
import torch.nn.functional as F
# TODO: Define your network architecture here
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
|
_____no_output_____
|
MIT
|
intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb
|
sizigia/deep-learning-v2-pytorch
|
Train the networkNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.htmlloss-functions) (something like `nn.CrossEntropyLoss` or `nn.NLLLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).Then write the training code. Remember the training pass is a fairly straightforward process:* Make a forward pass through the network to get the logits * Use the logits to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
|
# TODO: Create the network, define the criterion and optimizer
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
# TODO: Train the network here
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
log_ps = model(images)
loss = criterion(log_ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[1]
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img, ps, version='Fashion')
|
_____no_output_____
|
MIT
|
intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb
|
sizigia/deep-learning-v2-pytorch
|
Einstein Tensor calculations using Symbolic module
|
import numpy as np
import pytest
import sympy
from sympy import cos, simplify, sin, sinh, tensorcontraction
from einsteinpy.symbolic import EinsteinTensor, MetricTensor, RicciScalar
sympy.init_printing()
|
_____no_output_____
|
Apache-2.0
|
EinsteinPy/Einstein Tensor symbolic calculation.ipynb
|
IsaacW4/Advanced-GR
|
Defining the Anti-de Sitter spacetime Metric
|
syms = sympy.symbols("t chi theta phi")
t, ch, th, ph = syms
m = sympy.diag(-1, cos(t) ** 2, cos(t) ** 2 * sinh(ch) ** 2, cos(t) ** 2 * sinh(ch) ** 2 * sin(th) ** 2).tolist()
metric = MetricTensor(m, syms)
|
_____no_output_____
|
Apache-2.0
|
EinsteinPy/Einstein Tensor symbolic calculation.ipynb
|
IsaacW4/Advanced-GR
|
Calculating the Einstein Tensor (with both indices covariant)
|
einst = EinsteinTensor.from_metric(metric)
einst.tensor()
|
_____no_output_____
|
Apache-2.0
|
EinsteinPy/Einstein Tensor symbolic calculation.ipynb
|
IsaacW4/Advanced-GR
|
Ex1 - Filtering and Sorting Data This time we are going to pull data directly from the internet.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries
|
import pandas as pd
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). Step 3. Assign it to a variable called chipo.
|
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo = pd.read_csv(url, sep = '\t')
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 4. How many products cost more than $10.00?
|
# clean the item_price column and transform it in a float
prices = [float(value[1 : -1]) for value in chipo.item_price]
# reassign the column with the cleaned prices
chipo.item_price = prices
# delete the duplicates in item_name and quantity
chipo_filtered = chipo.drop_duplicates(['item_name','quantity'])
# select only the products with quantity equals to 1
chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
chipo_one_prod[chipo_one_prod['item_price']>10].item_name.nunique()
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 5. What is the price of each item? print a data frame with only two columns item_name and item_price
|
# delete the duplicates in item_name and quantity
# chipo_filtered = chipo.drop_duplicates(['item_name','quantity'])
chipo[(chipo['item_name'] == 'Chicken Bowl') & (chipo['quantity'] == 1)]
# select only the products with quantity equals to 1
# chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
# select only the item_name and item_price columns
# price_per_item = chipo_one_prod[['item_name', 'item_price']]
# sort the values from the most to less expensive
# price_per_item.sort_values(by = "item_price", ascending = False).head(20)
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 6. Sort by the name of the item
|
chipo.item_name.sort_values()
# OR
chipo.sort_values(by = "item_name")
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 7. What was the quantity of the most expensive item ordered?
|
chipo.sort_values(by = "item_price", ascending = False).head(1)
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 8. How many times were a Veggie Salad Bowl ordered?
|
chipo_salad = chipo[chipo.item_name == "Veggie Salad Bowl"]
len(chipo_salad)
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
Step 9. How many times people orderd more than one Canned Soda?
|
chipo_drink_steak_bowl = chipo[(chipo.item_name == "Canned Soda") & (chipo.quantity > 1)]
len(chipo_drink_steak_bowl)
|
_____no_output_____
|
BSD-3-Clause
|
02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb
|
duongv/pandas_exercises
|
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: Unit Testing `GiRaFFE_NRPy`: $A_k$ to $B^i$ Author: Patrick Nelson This module Validates the A-to-B routine for `GiRaFFE`.**Notebook Status:** Validated**Validation Notes:** This module will validate the routines in [Tutorial-GiRaFFE_NRPy-A2B](Tutorial-GiRaFFE_NRPy-A2B.ipynb). NRPy+ Source Code for this module: * [GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential in arbitrary spactimes. Introduction:This notebook validates our A-to-B solver for use in `GiRaFFE_NRPy`. Because the original `GiRaFFE` used staggered grids and we do not, we can not trivially do a direct comparison to the old code. Instead, we will compare the numerical results with the expected analytic results. It is, in general, good coding practice to unit test functions individually to verify that they produce the expected and intended output. Here, we expect our functions to produce the correct cross product in an arbitrary spacetime. To that end, we will choose functions that are easy to differentiate, but lack the symmetries that would trivialize the finite-difference algorithm. Higher-order polynomials are one such type of function. When this notebook is run, if `Use_Gaussian_Data` is `True`, the difference between the approximate and exact magnetic field will be output to text files that can be found in the same directory as this notebook. These will be read in in [Step 3](convergence), and used there to confirm second order convergence of the algorithm. Otherwise, is `Use_Gaussian_Data` is `False`, polynomial data will be used and the significant digits of agreement between the approximate and exact magnetic field will be printed to the screen right after the code is run [here](compile_run). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](setup): Set up core functions and parameters for unit testing the A2B algorithm 1. [Step 1.a](polynomial) Polynomial vector potential 1. [Step 1.b](gaussian) Gaussian vector potential 1. [Step 1.c](magnetic) The magnetic field $B^i$ 1. [Step 1.d](vector_potential) The vector potential $A_k$ 1. [Step 1.e](free_parameters) Set free parameters in the code1. [Step 2](mainc): `A2B_unit_test.c`: The Main C Code 1. [Step 2.a](compile_run): Compile and run the code1. [Step 3](convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order1. [Step 4](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Set up core functions and parameters for unit testing the A2B algorithm \[Back to [top](toc)\]$$\label{setup}$$We'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We must also set the desired finite differencing order.
|
import shutil, os, sys # Standard Python modules for multiplatform OS-level functions
# First, we'll add the parent directory to the list of directories Python will check for modules.
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import loop as lp # NRPy+: Generate C code loops
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
out_dir = "Validation/"
cmd.mkdir(out_dir)
thismodule = "Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B"
# Set the finite-differencing order to 2
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 2)
Use_Gaussian_Data = True
a,b,c,d,e,f,g,h,l,m,n,o,p,q,r,s,t,u = par.Cparameters("REAL",thismodule,["a","b","c","d","e","f","g","h","l","m","n","o","p","q","r","s","t","u"],1e300)
gammaDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gammaDD","sym01")
AD = ixp.register_gridfunctions_for_single_rank1("EVOL","AD")
BU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","BU")
|
_____no_output_____
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 1.a: Polynomial vector potential \[Back to [top](toc)\]$$\label{polynomial}$$We will start with the simplest case - testing the second-order solver. In second-order finite-differencing, we use a three-point stencil that can exactly differentiate polynomials up to quadratic. So, we will use cubic functions three variables. For instance,\begin{align}A_x &= ax^3 + by^3 + cz^3 + dy^2 + ez^2 + f \\A_y &= gx^3 + hy^3 + lz^3 + mx^2 + nz^2 + p \\A_z &= px^3 + qy^3 + rz^3 + sx^2 + ty^2 + u. \\\end{align}It will be much simpler to let NRPy+ handle most of this work. So, we will import the core functionality of NRPy+, build the expressions, and then output them using `outputC()`.
|
if not Use_Gaussian_Data:
is_gaussian = par.Cparameters("int",thismodule,"is_gaussian",0)
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
x = rfm.xxCart[0]
y = rfm.xxCart[1]
z = rfm.xxCart[2]
AD[0] = a*x**3 + b*y**3 + c*z**3 + d*y**2 + e*z**2 + f
AD[1] = g*x**3 + h*y**3 + l*z**3 + m*x**2 + n*z**2 + o
AD[2] = p*x**3 + q*y**3 + r*z**3 + s*x**2 + t*y**2 + u
|
_____no_output_____
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 1.b: Gaussian vector potential \[Back to [top](toc)\]$$\label{gaussian}$$Alternatively, we might want to use different functions for the vector potential. Here, we'll give some 3D Gaussians:\begin{align}A_x &= a e^{-((x-b)^2+(y-c)^2+(z-d)^2)} \\A_y &= f e^{-((x-g)^2+(y-h)^2+(z-l)^2)} \\A_z &= m e^{-((x-n)^2+(y-o)^2+(z-p)^2)}, \\\end{align}where $e$ is the natural number.
|
if Use_Gaussian_Data:
is_gaussian = par.Cparameters("int",thismodule,"is_gaussian",1)
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
x = rfm.xxCart[0]
y = rfm.xxCart[1]
z = rfm.xxCart[2]
AD[0] = a * sp.exp(-((x-b)**2 + (y-c)**2 + (z-d)**2))
AD[1] = f * sp.exp(-((x-g)**2 + (y-h)**2 + (z-l)**2))
AD[2] = m * sp.exp(-((x-n)**2 + (y-o)**2 + (z-p)**2))
|
_____no_output_____
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 1.c: The magnetic field $B^i$ \[Back to [top](toc)\]$$\label{magnetic}$$Next, we'll let NRPy+ compute derivatives analytically according to $$B^i = \frac{[ijk]}{\sqrt{\gamma}} \partial_j A_k.$$ Then we can carry out two separate tests to verify the numerical derivatives. First, we will verify that when we let the cubic terms be zero, the two calculations of $B^i$ agree to roundoff error. Second, we will verify that when we set the cubic terms, our error is dominated by trunction error that converges to zero at the expected rate. We will need a sample metric $\gamma_{ij}$ for $\sqrt{\gamma}$. We will thus write a function with the following arbitrary equations. \begin{align}\gamma_{xx} &= ax^3 + by^3 + cz^3 + dy^2 + ez^2 + 1 \\\gamma_{yy} &= gx^3 + hy^3 + lz^3 + mx^2 + nz^2 + 1 \\\gamma_{zz} &= px^3 + qy^3 + rz^3 + sx^2 + ty^2 + 1. \\\gamma_{xy} &= \frac{1}{10} \exp\left(-\left((x-b)^2+(y-c)^2+(z-d)^2\right)\right) \\\gamma_{xz} &= \frac{1}{10} \exp\left(-\left((x-g)^2+(y-h)^2+(z-l)^2\right)\right) \\\gamma_{yz} &= \frac{1}{10} \exp\left(-\left((x-n)^2+(y-o)^2+(z-p)^2\right)\right), \\\end{align}
|
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
x = rfm.xxCart[0]
y = rfm.xxCart[1]
z = rfm.xxCart[2]
gammaDD[0][0] = a*x**3 + b*y**3 + c*z**3 + d*y**2 + e*z**2 + sp.sympify(1)
gammaDD[1][1] = g*x**3 + h*y**3 + l*z**3 + m*x**2 + n*z**2 + sp.sympify(1)
gammaDD[2][2] = p*x**3 + q*y**3 + r*z**3 + s*x**2 + t*y**2 + sp.sympify(1)
gammaDD[0][1] = sp.Rational(1,10) * sp.exp(-((x-b)**2 + (y-c)**2 + (z-d)**2))
gammaDD[0][2] = sp.Rational(1,10) * sp.exp(-((x-g)**2 + (y-h)**2 + (z-l)**2))
gammaDD[1][2] = sp.Rational(1,10) * sp.exp(-((x-n)**2 + (y-o)**2 + (z-p)**2))
import GRHD.equations as gh
gh.compute_sqrtgammaDET(gammaDD)
import WeylScal4NRPy.WeylScalars_Cartesian as weyl
LeviCivitaDDD = weyl.define_LeviCivitaSymbol_rank3()
LeviCivitaUUU = ixp.zerorank3()
for i in range(3):
for j in range(3):
for k in range(3):
LeviCivitaUUU[i][j][k] = LeviCivitaDDD[i][j][k] / gh.sqrtgammaDET
B_analyticU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_analyticU")
for i in range(3):
B_analyticU[i] = 0
for j in range(3):
for k in range(3):
B_analyticU[i] += LeviCivitaUUU[i][j][k] * sp.diff(AD[k],rfm.xxCart[j])
metric_gfs_to_print = [\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD00"),rhs=gammaDD[0][0]),\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD01"),rhs=gammaDD[0][1]),\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD02"),rhs=gammaDD[0][2]),\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD11"),rhs=gammaDD[1][1]),\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD12"),rhs=gammaDD[1][2]),\
lhrh(lhs=gri.gfaccess("aux_gfs","gammaDD22"),rhs=gammaDD[2][2]),\
]
desc = "Calculate the metric gridfunctions"
name = "calculate_metric_gfs"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict auxevol_gfs",
body = fin.FD_outputC("returnstring",metric_gfs_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
|
Output C function calculate_metric_gfs() to file Validation/calculate_metric_gfs.h
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
We also should write a function that will use the analytic formulae for $B^i$.
|
B_analyticU_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","B_analyticU0"),rhs=B_analyticU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","B_analyticU1"),rhs=B_analyticU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","B_analyticU2"),rhs=B_analyticU[2]),\
]
desc = "Calculate the exact magnetic field"
name = "calculate_exact_BU"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict auxevol_gfs",
body = fin.FD_outputC("returnstring",B_analyticU_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
|
Output C function calculate_exact_BU() to file Validation/calculate_exact_BU.h
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 1.d: The vector potential $A_k$ \[Back to [top](toc)\]$$\label{vector_potential}$$We'll now write a function to set the vector potential $A_k$. This simply uses NRPy+ to generate most of the code from the expressions we wrote at the beginning. Then, we'll need to call the function from the module `GiRaFFE_NRPy_A2B` to generate the code we need. Also, we will declare the parameters for the vector potential functions.
|
AD_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=AD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=AD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=AD[2]),\
]
desc = "Calculate the vector potential"
name = "calculate_AD"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict out_gfs",
body = fin.FD_outputC("returnstring",AD_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
# cmd.mkdir(os.path.join(out_dir))
import GiRaFFE_NRPy.GiRaFFE_NRPy_A2B as A2B
# We'll generate these into the A2B subdirectory since that's where the functions
# we're testing expect them to be.
AD = ixp.declarerank1("AD") # Make sure these aren't analytic expressions
gammaDD = ixp.declarerank2("gammaDD","sym01")
A2B.GiRaFFE_NRPy_A2B(os.path.join(out_dir,"A2B"),gammaDD,AD,BU)
|
Output C function calculate_AD() to file Validation/calculate_AD.h
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 1.e: Set free parameters in the code \[Back to [top](toc)\]$$\label{free_parameters}$$We also need to create the files that interact with NRPy's C parameter interface.
|
# Step 3.d.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
# par.generate_Cparameters_Ccodes(os.path.join(out_dir))
# Step 3.d.ii: Set free_parameters.h
with open(os.path.join(out_dir,"free_parameters.h"),"w") as file:
file.write("""
// Override parameter defaults with values based on command line arguments and NGHOSTS.
// We'll use this grid. It has one point and one ghost zone.
params.Nxx0 = atoi(argv[1]);
params.Nxx1 = atoi(argv[2]);
params.Nxx2 = atoi(argv[3]);
params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS;
params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
const REAL xxmin[3] = {-0.01,-0.01,-0.01};
const REAL xxmax[3] = { 0.01, 0.01, 0.01};
params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx_plus_2NGHOSTS0-1.0);
params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx_plus_2NGHOSTS1-1.0);
params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx_plus_2NGHOSTS2-1.0);
printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2);
params.invdx0 = 1.0 / params.dxx0;
params.invdx1 = 1.0 / params.dxx1;
params.invdx2 = 1.0 / params.dxx2;
\n""")
# Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(out_dir))
|
_____no_output_____
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 2: `A2B_unit_test.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$Now that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access.
|
%%writefile $out_dir/A2B_unit_test.c
// These are common packages that we are likely to need.
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "string.h" // Needed for strncmp, etc.
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#include <time.h> // Needed to set a random seed.
#define REAL double
#include "declare_Cparameters_struct.h"
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
const int NGHOSTS = 3;
const int NGHOSTS_A2B = 3;
REAL a,b,c,d,e,f,g,h,l,m,n,o,p,q,r,s,t,u;
// Standard NRPy+ memory access:
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
|
Overwriting Validation//A2B_unit_test.c
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
We'll now define the gridfunction names.
|
%%writefile -a $out_dir/A2B_unit_test.c
// Let's also #define the NRPy+ gridfunctions
#define AD0GF 0
#define AD1GF 1
#define AD2GF 2
#define NUM_EVOL_GFS 3
#define GAMMADD00GF 0
#define GAMMADD01GF 1
#define GAMMADD02GF 2
#define GAMMADD11GF 3
#define GAMMADD12GF 4
#define GAMMADD22GF 5
#define B_ANALYTICU0GF 6
#define B_ANALYTICU1GF 7
#define B_ANALYTICU2GF 8
#define BU0GF 9
#define BU1GF 10
#define BU2GF 11
#define NUM_AUXEVOL_GFS 12
|
Appending to Validation//A2B_unit_test.c
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Now, we'll handle the different A2B codes. There are several things to do here. First, we'll add `include`s to the C code so that we have access to the functions we want to test, as generated above. We will choose to do this in the subfolder `A2B` relative to this tutorial.
|
%%writefile -a $out_dir/A2B_unit_test.c
#include "A2B/driver_AtoB.h" // This file contains both functions we need.
#include "calculate_exact_BU.h"
#include "calculate_AD.h"
#include "calculate_metric_gfs.h"
|
Appending to Validation//A2B_unit_test.c
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Now, we'll write the main method. First, we'll set up the grid. In this test, we cannot use only one point. As we are testing a three-point stencil, we can get away with a minimal $3 \times 3 \times 3$ grid. Then, we'll write the A fields. After that, we'll calculate the magnetic field two ways.
|
%%writefile -a $out_dir/A2B_unit_test.c
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Let the last argument be the test we're doing. 1 = coarser grid, 0 = finer grid.
int do_quadratic_test = atoi(argv[4]);
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
#include "set_Cparameters-nopointer.h"
// We'll define our grid slightly different from how we normally would. We let our outermost
// ghostzones coincide with xxmin and xxmax instead of the interior of the grid. This means
// that the ghostzone points will have identical positions so we can do convergence tests of them. // Step 0d.ii: Set up uniform coordinate grids
REAL *xx[3];
xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);
xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);
xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);
for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + ((REAL)(j))*dxx0;
for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + ((REAL)(j))*dxx1;
for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + ((REAL)(j))*dxx2;
//for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) printf("x[%d] = %.5e\n",j,xx[0][j]);
//for(int i=0;i<Nxx_plus_2NGHOSTS0;i++) printf("xx[0][%d] = %.15e\\n",i,xx[0][i]);
// This is the array to which we'll write the NRPy+ variables.
REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);
REAL *evol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);
for(int i=0;i<Nxx_plus_2NGHOSTS0;i++) for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) for(int k=0;k<Nxx_plus_2NGHOSTS1;k++) {
auxevol_gfs[IDX4S(BU0GF,i,j,k)] = 0.0;
auxevol_gfs[IDX4S(BU1GF,i,j,k)] = 0.0;
auxevol_gfs[IDX4S(BU2GF,i,j,k)] = 0.0;
}
// We now want to set up the vector potential. First, we must set the coefficients.
if(is_gaussian) {
// Gaussian coefficients:
// Magnitudes:
a = (double)(rand()%20)/5.0;
f = (double)(rand()%20)/5.0;
m = (double)(rand()%20)/5.0;
// Offsets:
b = (double)(rand()%10-5)/1000.0;
c = (double)(rand()%10-5)/1000.0;
d = (double)(rand()%10-5)/1000.0;
g = (double)(rand()%10-5)/1000.0;
h = (double)(rand()%10-5)/1000.0;
l = (double)(rand()%10-5)/1000.0;
n = (double)(rand()%10-5)/1000.0;
o = (double)(rand()%10-5)/1000.0;
p = (double)(rand()%10-5)/1000.0;
/*printf("Offsets: b,c,d = %f,%f,%f\n",b,c,d);
printf("Offsets: g,h,l = %f,%f,%f\n",g,h,l);
printf("Offsets: n,o,p = %f,%f,%f\n",n,o,p);*/
// First, calculate the test data on our grid:
}
else {
// Polynomial coefficients
// We will use random integers between -10 and 10. For the first test, we let the
// Cubic coefficients remain zero. Those are a,b,c,g,h,l,p,q, and r.
d = (double)(rand()%20-10);
e = (double)(rand()%20-10);
f = (double)(rand()%20-10);
m = (double)(rand()%20-10);
n = (double)(rand()%20-10);
o = (double)(rand()%20-10);
s = (double)(rand()%20-10);
t = (double)(rand()%20-10);
u = (double)(rand()%20-10);
}
calculate_metric_gfs(¶ms,xx,auxevol_gfs);
if(do_quadratic_test && !is_gaussian) {
calculate_AD(¶ms,xx,evol_gfs);
// We'll also calculate the exact solution for B^i
calculate_exact_BU(¶ms,xx,auxevol_gfs);
// And now for the numerical derivatives:
driver_A_to_B(¶ms,evol_gfs,auxevol_gfs);
printf("This test uses quadratic vector potentials, so the magnetic fields should agree to roundoff error.\n");
printf("Below, each row represents one point. Each column represents a component of the magnetic field.\n");
printf("Shown is the number of Significant Digits of Agreement, at least 13 is good, higher is better:\n\n");
for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
printf("i0,i1,i2=%d,%d,%d; SDA: %.3f, %.3f, %.3f\n",i0,i1,i2,
1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)])+1.e-15)),
1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)])+1.e-15)),
1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)])+1.e-15))
);
/*printf("%.3f, %.3f, %.3f\n",
auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],
auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],
auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)]
);*/
}
}
if(!is_gaussian) {
// Now, we'll set the cubic coefficients:
a = (double)(rand()%20-10);
b = (double)(rand()%20-10);
c = (double)(rand()%20-10);
g = (double)(rand()%20-10);
h = (double)(rand()%20-10);
l = (double)(rand()%20-10);
p = (double)(rand()%20-10);
q = (double)(rand()%20-10);
r = (double)(rand()%20-10);
// First, calculate the test data on our grid:
calculate_metric_gfs(¶ms,xx,auxevol_gfs);
}
// And recalculate on our initial grid:
calculate_AD(¶ms,xx,evol_gfs);
// We'll also calculate the exact solution for B^i
calculate_exact_BU(¶ms,xx,auxevol_gfs);
// And now for the numerical derivatives:
driver_A_to_B(¶ms,evol_gfs,auxevol_gfs);
char filename[100];
sprintf(filename,"out%d-numer.txt",Nxx0);
FILE *out2D = fopen(filename, "w");
if(do_quadratic_test || is_gaussian) {
for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
// We print the difference between approximate and exact numbers.
fprintf(out2D,"%.16e\t%.16e\t%.16e %e %e %e\n",
auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],
auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],
auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)],
xx[0][i0],xx[1][i1],xx[2][i2]
);
}
}
else {
for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
if (i0%2==0 && i1%2==0 && i2%2==0) {
// We print the difference between approximate and exact numbers.
fprintf(out2D,"%.16e\t%.16e\t%.16e %e %e %e\n",
auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],
auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],
auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)],
xx[0][i0],xx[1][i1],xx[2][i2]
);
}
}
}
fclose(out2D);
}
|
Appending to Validation//A2B_unit_test.c
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 2.a: Compile and run the code \[Back to [top](toc)\]$$\label{compile_run}$$Now that we have our file, we can compile it and run the executable.
|
import time
print("Now compiling, should take ~2 seconds...\n")
start = time.time()
cmd.C_compile(os.path.join(out_dir,"A2B_unit_test.c"), os.path.join(out_dir,"A2B_unit_test"))
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
print("Now running...\n")
start = time.time()
!./Validation/A2B_unit_test 1 1 1 1
if Use_Gaussian_Data:
# To do a convergence test, we'll also need a second grid with twice the resolution.
!./Validation/A2B_unit_test 7 7 7 1
# !./Validation/A2B_unit_test 19 19 19 1
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
|
Now compiling, should take ~2 seconds...
Compiling executable...
Executing `gcc -Ofast -fopenmp -march=native -funroll-loops Validation/A2B_unit_test.c -o Validation/A2B_unit_test -lm`...
Finished executing in 0.6135389804840088 seconds.
Finished compilation.
Finished in 0.6216833591461182 seconds.
Now running...
dxx0,dxx1,dxx2 = 3.33333e-03,3.33333e-03,3.33333e-03
dxx0,dxx1,dxx2 = 1.66667e-03,1.66667e-03,1.66667e-03
Finished in 0.25135135650634766 seconds.
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 3: Code validation: Verify that relative error in numerical solution converges to zero at the expected order \[Back to [top](toc)\]$$\label{convergence}$$Now that we have shown that when we use a quadratic vector potential, we get roundoff-level agreement (which is to be expected, since the finite-differencing used approximates the underlying function with a quadratic), we will use do a convergence test to show that when we can't exactly model the function, the truncation error dominates and converges to zero at the expected rate. For this, we use cubic functions for the vector potential. In the code above, we output the difference beteween the numeric and exact magnetic fields at the overlapping, non-edge, non-vertex points of two separate grids. Here, we import that data and calculate the convergence in the usual way, $$k = \log_2 \left( \frac{F - F_1}{F - F_2} \right),$$where $k$ is the convergence order, $F$ is the exact solution, $F_1$ is the approximate solution on the coarser grid with resolution $\Delta x$, and $F_2$ is the approximate solution on the finer grid with resolution $\Delta x/2$.Here, we will calculate the convergence of the L2 Norm over the points in each region: $$| B^i_{\rm approx} - B^i_{\rm exact}| = \sqrt{\frac{1}{N} \sum_{ijk} \left( B^i_{\rm approx} - B^i_{\rm exact} \right)^2}$$
|
import numpy as np
import matplotlib.pyplot as plt
Data1 = np.loadtxt("out1-numer.txt")
Data2 = np.loadtxt("out7-numer.txt")
# print("Convergence test: All should be approximately 2\n")
# convergence = np.log(np.divide(np.abs(Data1),np.abs(Data2)))/np.log(2)
# for i in range(len(convergence[:,0])):
# print(convergence[i,:])
def IDX4(i,j,k,Nxx_plus_2NGHOSTS0,Nxx_plus_2NGHOSTS1,Nxx_plus_2NGHOSTS2):
return (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (0) ) )
comp = 0 # 0->Bx, 1->By, 2->Bz
# First, let's do this over the interior
N = 7 # This is the number of total gridpoints
nface = 0 # This is the number of points we are taking the norm of.
nint = 0 # This is the number of points we are taking the norm of.
L2_1 = 0
L2_1_xm = 0 # We declare one L2 norm for each face.
L2_1_xp = 0
L2_1_ym = 0
L2_1_yp = 0
L2_1_zm = 0
L2_1_zp = 0
for k in range(N):
for j in range(N):
for i in range(N):
if i==0:
L2_1_xm += Data1[IDX4(i,j,k,N,N,N),comp]**2
nface += 1
if i==N-1:
L2_1_xp += Data1[IDX4(i,j,k,N,N,N),comp]**2
if j==0:
L2_1_ym += Data1[IDX4(i,j,k,N,N,N),comp]**2
if j==N-1:
L2_1_yp += Data1[IDX4(i,j,k,N,N,N),comp]**2
if k==0:
L2_1_zm += Data1[IDX4(i,j,k,N,N,N),comp]**2
if k==N-1:
L2_1_zp += Data1[IDX4(i,j,k,N,N,N),comp]**2
if not (i%(N-1)==0 or j%(N-1)==0 or k%(N-1)==0):
L2_1 += Data1[IDX4(i,j,k,N,N,N),comp]**2
nint += 1
L2_1 = np.sqrt(L2_1/(nint))
L2_1_xm = np.sqrt(L2_1_xm/(nface))
L2_1_xp = np.sqrt(L2_1_xp/(nface))
L2_1_ym = np.sqrt(L2_1_ym/(nface))
L2_1_yp = np.sqrt(L2_1_yp/(nface))
L2_1_zm = np.sqrt(L2_1_zm/(nface))
L2_1_zp = np.sqrt(L2_1_zp/(nface))
N = 13 # This is the number of total gridpoints
nface = 0 # This is the number of points we are taking the norm of.
nint = 0 # This is the number of points we are taking the norm of.
L2_2 = 0
L2_2_xm = 0
L2_2_xp = 0
L2_2_ym = 0
L2_2_yp = 0
L2_2_zm = 0
L2_2_zp = 0
for k in range(N):
for j in range(N):
for i in range(N):
if i==0:
L2_2_xm += Data2[IDX4(i,j,k,N,N,N),comp]**2
nface += 1
if i==N-1:
L2_2_xp += Data2[IDX4(i,j,k,N,N,N),comp]**2
if j==0:
L2_2_ym += Data2[IDX4(i,j,k,N,N,N),comp]**2
if j==N-1:
L2_2_yp += Data2[IDX4(i,j,k,N,N,N),comp]**2
if k==0:
L2_2_zm += Data2[IDX4(i,j,k,N,N,N),comp]**2
if k==N-1:
L2_2_zp += Data2[IDX4(i,j,k,N,N,N),comp]**2
if not (i%(N-1)==0 or j%(N-1)==0 or k%(N-1)==0):
L2_2 += Data2[IDX4(i,j,k,N,N,N),comp]**2
nint += 1
L2_2 = np.sqrt(L2_2/(nint))
L2_2_xm = np.sqrt(L2_2_xm/(nface))
L2_2_xp = np.sqrt(L2_2_xp/(nface))
L2_2_ym = np.sqrt(L2_2_ym/(nface))
L2_2_yp = np.sqrt(L2_2_yp/(nface))
L2_2_zm = np.sqrt(L2_2_zm/(nface))
L2_2_zp = np.sqrt(L2_2_zp/(nface))
print("Face | Res | L2 norm | Conv. Order")
print(" Int | Dx | " + "{:.7f}".format(L2_1) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2) + " | " + "{:.5f}".format(np.log2(L2_1/L2_2)))
print(" -x | Dx | " + "{:.7f}".format(L2_1_xm) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_xm) + " | " + "{:.5f}".format(np.log2(L2_1_xm/L2_2_xm)))
print(" +x | Dx | " + "{:.7f}".format(L2_1_xp) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_xp) + " | " + "{:.5f}".format(np.log2(L2_1_xp/L2_2_xp)))
print(" -y | Dx | " + "{:.7f}".format(L2_1_ym) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_ym) + " | " + "{:.5f}".format(np.log2(L2_1_ym/L2_2_ym)))
print(" +y | Dx | " + "{:.7f}".format(L2_1_yp) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_yp) + " | " + "{:.5f}".format(np.log2(L2_1_yp/L2_2_yp)))
print(" -z | Dx | " + "{:.7f}".format(L2_1_zm) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_zm) + " | " + "{:.5f}".format(np.log2(L2_1_zm/L2_2_zm)))
print(" +z | Dx | " + "{:.7f}".format(L2_1_zp) + " | -- ")
print(" -- | Dx/2 | " + "{:.7f}".format(L2_2_zp) + " | " + "{:.5f}".format(np.log2(L2_1_zp/L2_2_zp)))
|
Face | Res | L2 norm | Conv. Order
Int | Dx | 0.0000005 | --
-- | Dx/2 | 0.0000001 | 2.03057
-x | Dx | 0.0000008 | --
-- | Dx/2 | 0.0000002 | 2.08857
+x | Dx | 0.0000008 | --
-- | Dx/2 | 0.0000002 | 2.08857
-y | Dx | 0.0000008 | --
-- | Dx/2 | 0.0000002 | 1.64224
+y | Dx | 0.0000016 | --
-- | Dx/2 | 0.0000004 | 1.87830
-z | Dx | 0.0000010 | --
-- | Dx/2 | 0.0000002 | 2.09971
+z | Dx | 0.0000008 | --
-- | Dx/2 | 0.0000002 | 1.99483
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
|
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex
!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex
!rm -f Tut*.out Tut*.aux Tut*.log
|
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
|
BSD-2-Clause
|
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb
|
Steve-Hawk/nrpytutorial
|
http://www.yr.no/place/Norway/Telemark/Vinje/Haukeliseter/climate.month12.html
|
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
import csv
import pandas as pd
import datetime
from datetime import date
import calendar
%matplotlib inline
year = np.arange(2000,2017, 1)
T_av = [-4.1,\
-8.2,\
-10.7,\
-4.3,\
-4.1,\
-5.5,\
-0.5,\
-6.4,\
-6.6,\
-9.4,\
-14.8,\
-4.4,\
-10.7,\
-2.1,\
-6.0,\
-2.4,\
-2.3]
T_av = [float(i) for i in T_av]
Prec = [131.9,\
91.0,\
57.7,\
120.8,\
70.9,\
79.2,\
140.2,\
143.6,\
72.2,\
104.4,\
50.9,\
145.2,\
112.5,\
196.9,\
73.6,\
132.5,\
73.2]
T_ano = -7.5 +4.4
T_ano
prec_tick = np.arange(0,300,50)
t_tick = np.arange(-16,2,2)
fig1 = plt.figure(figsize=(11,7))
ax1 = fig1.add_subplot(1,1,1)
bar2 = ax1.bar(year,Prec, label='precipitation',color='lightblue')
ax1.axhline(y=100,c="gray",linewidth=2,zorder=1, linestyle = '--')
plt.grid(b=None, which='major', axis='y')
# add some text for labels, title and axes ticks
ax1.set_ylabel('Precipitation (%)', fontsize = '16')
ax1.set_yticklabels(prec_tick, fontsize='14')
ax1.set_title('30-yr Climate statistics December (2000 - 2016)', fontsize = '16')
ax1.set_xticks(year)
ax1.set_xticklabels(year, rotation=45,fontsize = '14') # rotate x label
ax1.set_ylim([0, 250])
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax1.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d.2' % int(height),
ha='center', va='bottom',fontsize =14)
autolabel(bar2)
plt.savefig('../Observations/clim_precDec_Haukeli.png')
#plt.close(fig)
plt.show(fig1)
fig2 = plt.figure(figsize=(11,7))
ax2 = fig2.add_subplot(1,1,1)
line1 = ax2.plot(year,T_av, 'og', label = 'T_avg', markersize = 16)
ax2.axhline(y = -7.5, c ='darkgreen', linewidth = 2, zorder = 0, linestyle = '--')
plt.grid(b=None, which='major', axis='both')
ax2.set_title('30-yr Climate statistics December (2000 - 2016)', fontsize = '16')
ax2.set_xticks(year)
ax2.set_yticklabels(t_tick, fontsize='14')
ax2.set_xticklabels(year, rotation=45,fontsize = '14') # rotate x label
#ax1.legend((bar1[0], bar2[0]), ('Men', 'Women'))
# add some text for labels, title and axes ticks
ax2.set_ylabel('Temperature C$^o$', fontsize = '16')
ax2.set_ylim([-15.5, 0])
plt.savefig('../Observations/clim_tempDec_Haukeli.png')
#plt.close(fig)
plt.show(fig2)
t08 = 100/204.7 * 15.9
t09 = 100/204.7 * 6.7
t10 = 100/204.7 * 5.7
t11 = 100/204.7 * 5.9
t22 = 100/204.7 * 21.4
t23 = 100/204.7 * 23.6
t24 = 100/204.7 * 24.9
t25 = 100/204.7 * 20.8
t26 = 100/204.7 * 13.7
t27 = 100/204.7 * 20.9
t31 = 100/204.7 * 37.8
print(t08,t09,t10,t11)
print(t22,t23,t24,t25,t26,t27)
print(t31)
t22+t23+t24+t25+t26+t27
|
_____no_output_____
|
MIT
|
yr_Dec_clim_2000_2016.ipynb
|
franzihe/Python_Masterthesis
|
DB2 Jupyter Notebook ExtensionsVersion: 2021-08-23 This code is imported as a Jupyter notebook extension in any notebooks you create with DB2 code in it. Place the following line of code in any notebook that you want to use these commands with:&37;run db2.ipynbThis code defines a Jupyter/Python magic command called `%sql` which allows you to execute DB2 specific calls to the database. There are other packages available for manipulating databases, but this one has been specificallydesigned for demonstrating a number of the SQL features available in DB2.There are two ways of executing the `%sql` command. A single line SQL statement would use theline format of the magic command:%sql SELECT * FROM EMPLOYEEIf you have a large block of sql then you would place the %%sql command at the beginning of the block and thenplace the SQL statements into the remainder of the block. Using this form of the `%%sql` statement means that thenotebook cell can only contain SQL and no other statements.%%sqlSELECT * FROM EMPLOYEEORDER BY LASTNAMEYou can have multiple lines in the SQL block (`%%sql`). The default SQL delimiter is the semi-column (`;`).If you have scripts (triggers, procedures, functions) that use the semi-colon as part of the script, you will need to use the `-d` option to change the delimiter to an at "`@`" sign. %%sql -dSELECT * FROM EMPLOYEE@CREATE PROCEDURE ...@The `%sql` command allows most DB2 commands to execute and has a special version of the CONNECT statement. A CONNECT by itself will attempt to reconnect to the database using previously used settings. If it cannot connect, it will prompt the user for additional information. The CONNECT command has the following format:%sql CONNECT TO <database> USER <userid> USING <password | ?> HOST <ip address> PORT <port number>If you use a "`?`" for the password field, the system will prompt you for a password. This avoids typing the password as clear text on the screen. If a connection is not successful, the system will print the errormessage associated with the connect request.If the connection is successful, the parameters are saved on your system and will be used the next time yourun a SQL statement, or when you issue the %sql CONNECT command with no parameters. In addition to the -d option, there are a number different options that you can specify at the beginning of the SQL: - `-d, -delim` - Change SQL delimiter to "`@`" from "`;`" - `-q, -quiet` - Quiet results - no messages returned from the function - `-r, -array` - Return the result set as an array of values instead of a dataframe - `-t, -time` - Time the following SQL statement and return the number of times it executes in 1 second - `-j` - Format the first character column of the result set as a JSON record - `-json` - Return result set as an array of json records - `-a, -all` - Return all rows in answer set and do not limit display - `-grid` - Display the results in a scrollable grid - `-pb, -bar` - Plot the results as a bar chart - `-pl, -line` - Plot the results as a line chart - `-pp, -pie` - Plot the results as a pie chart - `-e, -echo` - Any macro expansions are displayed in an output box - `-sampledata` - Create and load the EMPLOYEE and DEPARTMENT tablesYou can pass python variables to the `%sql` command by using the `{}` braces with the name of thevariable inbetween. Note that you will need to place proper punctuation around the variable in the event theSQL command requires it. For instance, the following example will find employee '000010' in the EMPLOYEE table.empno = '000010'%sql SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO='{empno}'The other option is to use parameter markers. What you would need to do is use the name of the variable with a colon in front of it and the program will prepare the statement and then pass the variable to Db2 when the statement is executed. This allows you to create complex strings that might contain quote characters and other special characters and not have to worry about enclosing the string with the correct quotes. Note that you do not place the quotes around the variable even though it is a string.empno = '000020'%sql SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=:empno Development SQLThe previous set of `%sql` and `%%sql` commands deals with SQL statements and commands that are run in an interactive manner. There is a class of SQL commands that are more suited to a development environment where code is iterated or requires changing input. The commands that are associated with this form of SQL are:- AUTOCOMMIT- COMMIT/ROLLBACK- PREPARE - EXECUTEAutocommit is the default manner in which SQL statements are executed. At the end of the successful completion of a statement, the results are commited to the database. There is no concept of a transaction where multiple DML/DDL statements are considered one transaction. The `AUTOCOMMIT` command allows you to turn autocommit `OFF` or `ON`. This means that the set of SQL commands run after the `AUTOCOMMIT OFF` command are executed are not commited to the database until a `COMMIT` or `ROLLBACK` command is issued.`COMMIT` (`WORK`) will finalize all of the transactions (`COMMIT`) to the database and `ROLLBACK` will undo all of the changes. If you issue a `SELECT` statement during the execution of your block, the results will reflect all of your changes. If you `ROLLBACK` the transaction, the changes will be lost.`PREPARE` is typically used in a situation where you want to repeatidly execute a SQL statement with different variables without incurring the SQL compilation overhead. For instance:```x = %sql PREPARE SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=?for y in ['000010','000020','000030']: %sql execute :x using :y````EXECUTE` is used to execute a previously compiled statement. To retrieve the error codes that might be associated with any SQL call, the following variables are updated after every call:* SQLCODE* SQLSTATE* SQLERROR - Full error message retrieved from Db2 Install Db2 Python DriverIf the ibm_db driver is not installed on your system, the subsequent Db2 commands will fail. In order to install the Db2 driver, issue the following command from a Jupyter notebook cell:```!pip install --user ibm_db``` Db2 Jupyter ExtensionsThis section of code has the import statements and global variables defined for the remainder of the functions.
|
#
# Set up Jupyter MAGIC commands "sql".
# %sql will return results from a DB2 select statement or execute a DB2 command
#
# IBM 2021: George Baklarz
# Version 2021-07-13
#
from __future__ import print_function
from IPython.display import HTML as pHTML, Image as pImage, display as pdisplay, Javascript as Javascript
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic, needs_local_scope)
import ibm_db
import pandas
import ibm_db_dbi
import json
import matplotlib
import matplotlib.pyplot as plt
import getpass
import os
import pickle
import time
import sys
import re
import warnings
warnings.filterwarnings("ignore")
# Python Hack for Input between 2 and 3
try:
input = raw_input
except NameError:
pass
_settings = {
"maxrows" : 10,
"maxgrid" : 5,
"runtime" : 1,
"display" : "PANDAS",
"database" : "",
"hostname" : "localhost",
"port" : "50000",
"protocol" : "TCPIP",
"uid" : "DB2INST1",
"pwd" : "password",
"ssl" : ""
}
_environment = {
"jupyter" : True,
"qgrid" : True
}
_display = {
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': False,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': False,
'autoEdit': False,
'explicitInitialization': True,
'maxVisibleRows': 5,
'minVisibleRows': 5,
'sortable': True,
'filterable': False,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
# Connection settings for statements
_connected = False
_hdbc = None
_hdbi = None
_stmt = []
_stmtID = []
_stmtSQL = []
_vars = {}
_macros = {}
_flags = []
_debug = False
# Db2 Error Messages and Codes
sqlcode = 0
sqlstate = "0"
sqlerror = ""
sqlelapsed = 0
# Check to see if QGrid is installed
try:
import qgrid
qgrid.set_defaults(grid_options=_display)
except:
_environment['qgrid'] = False
# Check if we are running in iPython or Jupyter
try:
if (get_ipython().config == {}):
_environment['jupyter'] = False
_environment['qgrid'] = False
else:
_environment['jupyter'] = True
except:
_environment['jupyter'] = False
_environment['qgrid'] = False
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
OptionsThere are four options that can be set with the **`%sql`** command. These options are shown below with the default value shown in parenthesis.- **`MAXROWS n (10)`** - The maximum number of rows that will be displayed before summary information is shown. If the answer set is less than this number of rows, it will be completely shown on the screen. If the answer set is larger than this amount, only the first 5 rows and last 5 rows of the answer set will be displayed. If you want to display a very large answer set, you may want to consider using the grid option `-g` to display the results in a scrollable table. If you really want to show all results then setting MAXROWS to -1 will return all output.- **`MAXGRID n (5)`** - The maximum size of a grid display. When displaying a result set in a grid `-g`, the default size of the display window is 5 rows. You can set this to a larger size so that more rows are shown on the screen. Note that the minimum size always remains at 5 which means that if the system is unable to display your maximum row size it will reduce the table display until it fits.- **`DISPLAY PANDAS | GRID (PANDAS)`** - Display the results as a PANDAS dataframe (default) or as a scrollable GRID- **`RUNTIME n (1)`** - When using the timer option on a SQL statement, the statement will execute for **`n`** number of seconds. The result that is returned is the number of times the SQL statement executed rather than the execution time of the statement. The default value for runtime is one second, so if the SQL is very complex you will need to increase the run time.- **`LIST`** - Display the current settingsTo set an option use the following syntax:```%sql option option_name value option_name value ....```The following example sets all options:```%sql option maxrows 100 runtime 2 display grid maxgrid 10```The values will **not** be saved between Jupyter notebooks sessions. If you need to retrieve the current options values, use the LIST command as the only argument:```%sql option list```
|
def setOptions(inSQL):
global _settings, _display
cParms = inSQL.split()
cnt = 0
while cnt < len(cParms):
if cParms[cnt].upper() == 'MAXROWS':
if cnt+1 < len(cParms):
try:
_settings["maxrows"] = int(cParms[cnt+1])
except Exception as err:
errormsg("Invalid MAXROWS value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXROWS option.")
return
elif cParms[cnt].upper() == 'MAXGRID':
if cnt+1 < len(cParms):
try:
maxgrid = int(cParms[cnt+1])
if (maxgrid <= 5): # Minimum window size is 5
maxgrid = 5
_display["maxVisibleRows"] = int(cParms[cnt+1])
try:
import qgrid
qgrid.set_defaults(grid_options=_display)
except:
_environment['qgrid'] = False
except Exception as err:
errormsg("Invalid MAXGRID value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXROWS option.")
return
elif cParms[cnt].upper() == 'RUNTIME':
if cnt+1 < len(cParms):
try:
_settings["runtime"] = int(cParms[cnt+1])
except Exception as err:
errormsg("Invalid RUNTIME value provided.")
pass
cnt = cnt + 1
else:
errormsg("No value provided for the RUNTIME option.")
return
elif cParms[cnt].upper() == 'DISPLAY':
if cnt+1 < len(cParms):
if (cParms[cnt+1].upper() == 'GRID'):
_settings["display"] = 'GRID'
elif (cParms[cnt+1].upper() == 'PANDAS'):
_settings["display"] = 'PANDAS'
else:
errormsg("Invalid DISPLAY value provided.")
cnt = cnt + 1
else:
errormsg("No value provided for the DISPLAY option.")
return
elif (cParms[cnt].upper() == 'LIST'):
print("(MAXROWS) Maximum number of rows displayed: " + str(_settings["maxrows"]))
print("(MAXGRID) Maximum grid display size: " + str(_settings["maxgrid"]))
print("(RUNTIME) How many seconds to a run a statement for performance testing: " + str(_settings["runtime"]))
print("(DISPLAY) Use PANDAS or GRID display format for output: " + _settings["display"])
return
else:
cnt = cnt + 1
save_settings()
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
SQL HelpThe calling format of this routine is:```sqlhelp()```This code displays help related to the %sql magic command. This help is displayed when you issue a %sql or %%sql command by itself, or use the %sql -h flag.
|
def sqlhelp():
global _environment
if (_environment["jupyter"] == True):
sd = '<td style="text-align:left;">'
ed1 = '</td>'
ed2 = '</td>'
sh = '<th style="text-align:left;">'
eh1 = '</th>'
eh2 = '</th>'
sr = '<tr>'
er = '</tr>'
helpSQL = """
<h3>SQL Options</h3>
<p>The following options are available as part of a SQL statement. The options are always preceded with a
minus sign (i.e. -q).
<table>
{sr}
{sh}Option{eh1}{sh}Description{eh2}
{er}
{sr}
{sd}a, all{ed1}{sd}Return all rows in answer set and do not limit display{ed2}
{er}
{sr}
{sd}d{ed1}{sd}Change SQL delimiter to "@" from ";"{ed2}
{er}
{sr}
{sd}e, echo{ed1}{sd}Echo the SQL command that was generated after macro and variable substituion.{ed2}
{er}
{sr}
{sd}h, help{ed1}{sd}Display %sql help information.{ed2}
{er}
{sr}
{sd}j{ed1}{sd}Create a pretty JSON representation. Only the first column is formatted{ed2}
{er}
{sr}
{sd}json{ed1}{sd}Retrieve the result set as a JSON record{ed2}
{er}
{sr}
{sd}pb, bar{ed1}{sd}Plot the results as a bar chart{ed2}
{er}
{sr}
{sd}pl, line{ed1}{sd}Plot the results as a line chart{ed2}
{er}
{sr}
{sd}pp, pie{ed1}{sd}Plot Pie: Plot the results as a pie chart{ed2}
{er}
{sr}
{sd}q, quiet{ed1}{sd}Quiet results - no answer set or messages returned from the function{ed2}
{er}
{sr}
{sd}r, array{ed1}{sd}Return the result set as an array of values{ed2}
{er}
{sr}
{sd}sampledata{ed1}{sd}Create and load the EMPLOYEE and DEPARTMENT tables{ed2}
{er}
{sr}
{sd}t,time{ed1}{sd}Time the following SQL statement and return the number of times it executes in 1 second{ed2}
{er}
{sr}
{sd}grid{ed1}{sd}Display the results in a scrollable grid{ed2}
{er}
</table>
"""
else:
helpSQL = """
SQL Options
The following options are available as part of a SQL statement. Options are always
preceded with a minus sign (i.e. -q).
Option Description
a, all Return all rows in answer set and do not limit display
d Change SQL delimiter to "@" from ";"
e, echo Echo the SQL command that was generated after substitution
h, help Display %sql help information
j Create a pretty JSON representation. Only the first column is formatted
json Retrieve the result set as a JSON record
pb, bar Plot the results as a bar chart
pl, line Plot the results as a line chart
pp, pie Plot Pie: Plot the results as a pie chart
q, quiet Quiet results - no answer set or messages returned from the function
r, array Return the result set as an array of values
sampledata Create and load the EMPLOYEE and DEPARTMENT tables
t,time Time the SQL statement and return the execution count per second
grid Display the results in a scrollable grid
"""
helpSQL = helpSQL.format(**locals())
if (_environment["jupyter"] == True):
pdisplay(pHTML(helpSQL))
else:
print(helpSQL)
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
Connection HelpThe calling format of this routine is:```connected_help()```This code displays help related to the CONNECT command. This code is displayed when you issue a %sql CONNECT command with no arguments or you are running a SQL statement and there isn't any connection to a database yet.
|
def connected_help():
sd = '<td style="text-align:left;">'
ed = '</td>'
sh = '<th style="text-align:left;">'
eh = '</th>'
sr = '<tr>'
er = '</tr>'
if (_environment['jupyter'] == True):
helpConnect = """
<h3>Connecting to Db2</h3>
<p>The CONNECT command has the following format:
<p>
<pre>
%sql CONNECT TO <database> USER <userid> USING <password|?> HOST <ip address> PORT <port number> <SSL>
%sql CONNECT CREDENTIALS <varname>
%sql CONNECT CLOSE
%sql CONNECT RESET
%sql CONNECT PROMPT - use this to be prompted for values
</pre>
<p>
If you use a "?" for the password field, the system will prompt you for a password. This avoids typing the
password as clear text on the screen. If a connection is not successful, the system will print the error
message associated with the connect request.
<p>
The <b>CREDENTIALS</b> option allows you to use credentials that are supplied by Db2 on Cloud instances.
The credentials can be supplied as a variable and if successful, the variable will be saved to disk
for future use. If you create another notebook and use the identical syntax, if the variable
is not defined, the contents on disk will be used as the credentials. You should assign the
credentials to a variable that represents the database (or schema) that you are communicating with.
Using familiar names makes it easier to remember the credentials when connecting.
<p>
<b>CONNECT CLOSE</b> will close the current connection, but will not reset the database parameters. This means that
if you issue the CONNECT command again, the system should be able to reconnect you to the database.
<p>
<b>CONNECT RESET</b> will close the current connection and remove any information on the connection. You will need
to issue a new CONNECT statement with all of the connection information.
<p>
If the connection is successful, the parameters are saved on your system and will be used the next time you
run an SQL statement, or when you issue the %sql CONNECT command with no parameters.
<p>If you issue CONNECT RESET, all of the current values will be deleted and you will need to
issue a new CONNECT statement.
<p>A CONNECT command without any parameters will attempt to re-connect to the previous database you
were using. If the connection could not be established, the program to prompt you for
the values. To cancel the connection attempt, enter a blank value for any of the values. The connection
panel will request the following values in order to connect to Db2:
<table>
{sr}
{sh}Setting{eh}
{sh}Description{eh}
{er}
{sr}
{sd}Database{ed}{sd}Database name you want to connect to.{ed}
{er}
{sr}
{sd}Hostname{ed}
{sd}Use localhost if Db2 is running on your own machine, but this can be an IP address or host name.
{er}
{sr}
{sd}PORT{ed}
{sd}The port to use for connecting to Db2. This is usually 50000.{ed}
{er}
{sr}
{sd}SSL{ed}
{sd}If you are connecting to a secure port (50001) with SSL then you must include this keyword in the connect string.{ed}
{sr}
{sd}Userid{ed}
{sd}The userid to use when connecting (usually DB2INST1){ed}
{er}
{sr}
{sd}Password{ed}
{sd}No password is provided so you have to enter a value{ed}
{er}
</table>
"""
else:
helpConnect = """\
Connecting to Db2
The CONNECT command has the following format:
%sql CONNECT TO database USER userid USING password | ?
HOST ip address PORT port number SSL
%sql CONNECT CREDENTIALS varname
%sql CONNECT CLOSE
%sql CONNECT RESET
If you use a "?" for the password field, the system will prompt you for a password.
This avoids typing the password as clear text on the screen. If a connection is
not successful, the system will print the error message associated with the connect
request.
The CREDENTIALS option allows you to use credentials that are supplied by Db2 on
Cloud instances. The credentials can be supplied as a variable and if successful,
the variable will be saved to disk for future use. If you create another notebook
and use the identical syntax, if the variable is not defined, the contents on disk
will be used as the credentials. You should assign the credentials to a variable
that represents the database (or schema) that you are communicating with. Using
familiar names makes it easier to remember the credentials when connecting.
CONNECT CLOSE will close the current connection, but will not reset the database
parameters. This means that if you issue the CONNECT command again, the system
should be able to reconnect you to the database.
CONNECT RESET will close the current connection and remove any information on the
connection. You will need to issue a new CONNECT statement with all of the connection
information.
If the connection is successful, the parameters are saved on your system and will be
used the next time you run an SQL statement, or when you issue the %sql CONNECT
command with no parameters. If you issue CONNECT RESET, all of the current values
will be deleted and you will need to issue a new CONNECT statement.
A CONNECT command without any parameters will attempt to re-connect to the previous
database you were using. If the connection could not be established, the program to
prompt you for the values. To cancel the connection attempt, enter a blank value for
any of the values. The connection panel will request the following values in order
to connect to Db2:
Setting Description
Database Database name you want to connect to
Hostname Use localhost if Db2 is running on your own machine, but this can
be an IP address or host name.
PORT The port to use for connecting to Db2. This is usually 50000.
Userid The userid to use when connecting (usually DB2INST1)
Password No password is provided so you have to enter a value
SSL Include this keyword to indicate you are connecting via SSL (usually port 50001)
"""
helpConnect = helpConnect.format(**locals())
if (_environment['jupyter'] == True):
pdisplay(pHTML(helpConnect))
else:
print(helpConnect)
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
Prompt for Connection InformationIf you are running an SQL statement and have not yet connected to a database, the %sql command will prompt you for connection information. In order to connect to a database, you must supply:- Database name - Host name (IP address or name)- Port number- Userid- Password- Secure socketThe routine is called without any parameters:```connected_prompt()```
|
# Prompt for Connection information
def connected_prompt():
global _settings
_database = ''
_hostname = ''
_port = ''
_uid = ''
_pwd = ''
_ssl = ''
print("Enter the database connection details (Any empty value will cancel the connection)")
_database = input("Enter the database name: ");
if (_database.strip() == ""): return False
_hostname = input("Enter the HOST IP address or symbolic name: ");
if (_hostname.strip() == ""): return False
_port = input("Enter the PORT number: ");
if (_port.strip() == ""): return False
_ssl = input("Is this a secure (SSL) port (y or n)");
if (_ssl.strip() == ""): return False
if (_ssl == "n"):
_ssl = ""
else:
_ssl = "Security=SSL;"
_uid = input("Enter Userid on the DB2 system: ").upper();
if (_uid.strip() == ""): return False
_pwd = getpass.getpass("Password [password]: ");
if (_pwd.strip() == ""): return False
_settings["database"] = _database.strip()
_settings["hostname"] = _hostname.strip()
_settings["port"] = _port.strip()
_settings["uid"] = _uid.strip()
_settings["pwd"] = _pwd.strip()
_settings["ssl"] = _ssl.strip()
_settings["maxrows"] = 10
_settings["maxgrid"] = 5
_settings["runtime"] = 1
return True
# Split port and IP addresses
def split_string(in_port,splitter=":"):
# Split input into an IP address and Port number
global _settings
checkports = in_port.split(splitter)
ip = checkports[0]
if (len(checkports) > 1):
port = checkports[1]
else:
port = None
return ip, port
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
Connect Syntax ParserThe parseConnect routine is used to parse the CONNECT command that the user issued within the %sql command. The format of the command is:```parseConnect(inSQL)```The inSQL string contains the CONNECT keyword with some additional parameters. The format of the CONNECT command is one of:```CONNECT RESETCONNECT CLOSECONNECT CREDENTIALS CONNECT TO database USER userid USING password HOST hostname PORT portnumber ```If you have credentials available from Db2 on Cloud, place the contents of the credentials into a variable and then use the `CONNECT CREDENTIALS ` syntax to connect to the database.In addition, supplying a question mark (?) for password will result in the program prompting you for the password rather than having it as clear text in your scripts.When all of the information is checked in the command, the db2_doConnect function is called to actually do the connection to the database.
|
# Parse the CONNECT statement and execute if possible
def parseConnect(inSQL,local_ns):
global _settings, _connected
_connected = False
cParms = inSQL.split()
cnt = 0
_settings["ssl"] = ""
while cnt < len(cParms):
if cParms[cnt].upper() == 'TO':
if cnt+1 < len(cParms):
_settings["database"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No database specified in the CONNECT statement")
return
elif cParms[cnt].upper() == "SSL":
_settings["ssl"] = "Security=SSL;"
cnt = cnt + 1
elif cParms[cnt].upper() == 'CREDENTIALS':
if cnt+1 < len(cParms):
credentials = cParms[cnt+1]
tempid = eval(credentials,local_ns)
if (isinstance(tempid,dict) == False):
errormsg("The CREDENTIALS variable (" + credentials + ") does not contain a valid Python dictionary (JSON object)")
return
if (tempid == None):
fname = credentials + ".pickle"
try:
with open(fname,'rb') as f:
_id = pickle.load(f)
except:
errormsg("Unable to find credential variable or file.")
return
else:
_id = tempid
try:
_settings["database"] = _id["db"]
_settings["hostname"] = _id["hostname"]
_settings["port"] = _id["port"]
_settings["uid"] = _id["username"]
_settings["pwd"] = _id["password"]
try:
fname = credentials + ".pickle"
with open(fname,'wb') as f:
pickle.dump(_id,f)
except:
errormsg("Failed trying to write Db2 Credentials.")
return
except:
errormsg("Credentials file is missing information. db/hostname/port/username/password required.")
return
else:
errormsg("No Credentials name supplied")
return
cnt = cnt + 1
elif cParms[cnt].upper() == 'USER':
if cnt+1 < len(cParms):
_settings["uid"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No userid specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'USING':
if cnt+1 < len(cParms):
_settings["pwd"] = cParms[cnt+1]
if (_settings["pwd"] == '?'):
_settings["pwd"] = getpass.getpass("Password [password]: ") or "password"
cnt = cnt + 1
else:
errormsg("No password specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'HOST':
if cnt+1 < len(cParms):
hostport = cParms[cnt+1].upper()
ip, port = split_string(hostport)
if (port == None): _settings["port"] = "50000"
_settings["hostname"] = ip
cnt = cnt + 1
else:
errormsg("No hostname specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PORT':
if cnt+1 < len(cParms):
_settings["port"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No port specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PROMPT':
if (connected_prompt() == False):
print("Connection canceled.")
return
else:
cnt = cnt + 1
elif cParms[cnt].upper() in ('CLOSE','RESET') :
try:
result = ibm_db.close(_hdbc)
_hdbi.close()
except:
pass
success("Connection closed.")
if cParms[cnt].upper() == 'RESET':
_settings["database"] = ''
return
else:
cnt = cnt + 1
_ = db2_doConnect()
|
_____no_output_____
|
Apache-2.0
|
db2.ipynb
|
Db2-DTE-POC/Db2-Openshift-11.5.4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.