markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
(3) Internações por dia em cada município
from datetime import date datas = pd.date_range(date(2018,7,1), periods=365).tolist() lst_mun_ba = list(mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values) datas[0] datas[-1] # Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019 df2[(df2['DT_Inter'] <= datas[-1]) & (df2['DT_Saida'] >= datas[0]) & (df2['DT_Saida'] <= datas[-1]) & (df2['Cod_Municipio'] == '292740')] ssa = [] for dt in datas: ssa.append(len(df2[(df2['DT_Inter'] <= dt) & (df2['DT_Saida'] >= dt) & (df2['Cod_Municipio'] == '292740')])) pd_ssa = pd.DataFrame(zip(ssa,datas), columns = ['intern', 'data']) pd_ssa['datas'] = pd.to_datetime(pd_ssa['data']) pd_ssa['intern'].plot(figsize = (20,10), style = 'o--', markersize = 5); plt.ylim(0,max(pd_ssa['intern'])+1000); plt.xlim(-1,365); plt.show(); max(ssa) min(ssa)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
* **Série temporal para todos os municípios:**
ba_int = pd.DataFrame(index=datas, columns=mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values) list_mun = list(mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values) for i, row in ba_int.iterrows(): for mun in list_mun: row[mun] = len(df2[(df2['DT_Inter'] <= i) & (df2['DT_Saida'] >= i) & (df2['Cod_Municipio'] == mun)]) ba_int ba_int.to_excel('NT02 - Bahia/ba_int_dia.xlsx')
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4) Padrão Origem-Destino das Internações
df.info() per = pd.date_range(date(2018,7,1), periods=365).tolist() per[0] per[-1] # Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019 df_BA = df2[(df2['DT_Inter'] <= per[-1]) & (df2['DT_Saida'] >= per[0]) & (df2['DT_Saida'] <= per[-1]) & (df2['Cod_Municipio_Res'].str.startswith('29'))] #df_BA = df2[(df2['Cod_Municipio'].str.startswith('29')) & (df2['Cod_Municipio_Res'].str.startswith('29'))].copy() df_BA['Quantidade'] = 1 df_BA.groupby(['Cod_Municipio_Res','Cod_Municipio']).sum() df_BA['Quantidade'].sum() tab = df_BA.groupby(['Cod_Municipio_Res','Cod_Municipio']).sum() tab_OD = pd.DataFrame(columns = ['ORI','DES','Qtd','Dia','Qtd_Dia']) tab_OD tab.index[0][1] for i in np.arange(len(tab)): ORI = tab.index[i][0] DES = tab.index[i][1] Qtd = tab.loc[tab.index[i],'Quantidade'] Dia = tab.loc[tab.index[i],'Quantidade Diarias'] Qtd_Dia = tab.loc[tab.index[i],'Quantidade']*tab.loc[tab.index[i],'Quantidade Diarias'] tab_OD.loc[i] = [ORI, DES, Qtd, Dia, Qtd_Dia] tab_OD tab_OD['ORI_GC'] = 0 tab_OD['DES_GC'] = 0 for i, el in enumerate(zip(tab_OD['ORI'],tab_OD['DES'])): tab_OD.loc[i,'ORI_GC'] = mun_ba[mun_ba['GEOCODIGO'].str.startswith(str(el[0]))]['GEOCODIGO'].values[0] tab_OD.loc[i,'DES_GC'] = mun_ba[mun_ba['GEOCODIGO'].str.startswith(str(el[1]))]['GEOCODIGO'].values[0] tab_OD['Qtd'] = pd.to_numeric(tab_OD['Qtd']) tab_OD['Dia'] = pd.to_numeric(tab_OD['Dia']) tab_OD['Qtd_Dia'] = pd.to_numeric(tab_OD['Qtd_Dia']) tab_OD.head() tab_OD.info() tab_OD.to_excel('NT02 - Bahia/tab_OD.xlsx', index = False) tab_OD = pd.read_excel('NT02 - Bahia/tab_OD.xlsx') tab_OD_dif = tab_OD[tab_OD['ORI'] != tab_OD['DES']].copy() tab_OD_dif.to_excel('NT02 - Bahia/tab_OD_dif.xlsx', index = False) tab_OD_dif.sort_values(by='Qtd', ascending = False).head(20)[['ORI_GC','DES_GC','Qtd','Dia','Qtd_Dia']]
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.1) Principais centros de internação hospitalar (origens mais demandadas)
tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum() tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:20]
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
Proporção:
tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:50]['Qtd']/tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum() (tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:50]['Qtd']/tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum()).sum()
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.2) Municípios mais atendidos pelos principais centros de internação hospitalar
mun_ba.loc[mun_ba['GEOCODIGO'].isin(tab_OD['DES_GC'].astype(str))][['NOME','NOMEABREV','geometry']] idx = list(tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:10]['Qtd'].index)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
20 municípios mais atendidos dos 10 maiores centros de atendimento
for k in np.arange(len(idx)): mun_ba[mun_ba['GEOCODIGO']==idx[k]]['NOME'].values[0] #Nome tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'].sum() #Quantidade de internações tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'][:20].sum() \ /tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'].sum() # Percentual de internações que estes 20 representam mun_ba[mun_ba['GEOCODIGO']==idx[0]]['NOME'] tab_OD[tab_OD['DES_GC']==idx[0]].sort_values(by='Qtd', ascending = False)['ORI_GC'][:20].values atend = [] for k in np.arange(len(idx)): idx_mun = tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['ORI_GC'][:20].values int_mun = tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'][:20].values nome_mun = list(map(lambda x: mun_ba[mun_ba['GEOCODIGO']==x]['NOME'].values[0], idx_mun)) #pd.DataFrame(zip(idx_mun,nome_mun,int_mun), columns = ['Geocódigo','Município','Internações']) for i in idx_mun: atend.append(i) len(atend) len(list(set(atend))) atend = list(set(atend)) mun_ba[mun_ba['GEOCODIGO'].isin(atend)]['Pop'].sum() mun_ba[mun_ba['GEOCODIGO'].isin(atend)]['Pop'].sum()/mun_ba['Pop'].sum()
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.3) Análise da Pandemia no NRS Sul: **Núcleos Regionais de Saúde:**
nrs = gpd.read_file('NT02 - Bahia/Oferta Hospitalar/SESAB - NUCLEO REG SAUDE - 20190514 - SIRGAS2000.shp') nrs = nrs.to_crs(CRS("WGS84")); nrs.crs mun_ba.crs == nrs.crs nrs mun_ba['NRS'] = 0 for i in list(nrs.index): mun_ba.loc[mun_ba['geometry'].apply(lambda x: x.centroid.within(nrs.loc[i,'geometry'])),'NRS'] = nrs.loc[i,'NM_NRS'] mun_ba.plot(column = 'NRS'); plt.show();
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
População
for i in nrs['NM_NRS'].values: print(i,mun_ba[mun_ba['NRS']==i]['Pop'].sum()) mun_ba['Qtd_Tot'].sum() nrs.to_file('NT02 - Bahia/nrs.shp')
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Municípios com maior prevalência:**
fig, ax = plt.subplots(figsize=(10,10)); mun_ba.plot(ax = ax, column = 'prev'); plt.show(); # 20 maiores do Estado: mun_ba.sort_values(by='prev', ascending = False)[['GEOCODIGO','NOME','Pop','prev','NRS']][:20] # Quantidade de municípios no NRS Sul que já possuem casos confirmados até 24/04/2020 len(mun_ba[(mun_ba['NRS']=='Sul') & (mun_ba['c20200424']>0)]) # 10 maiores da Região Sul: mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)[['GEOCODIGO','NOME','prev']][:14]
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.4) Oferta Hospitalar no NRS Sul **Leitos convencionais:**
leitos = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos.xlsx') leitos.info() leitos.head(2)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Leitos complementares:**
leitos_c = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos_comp.xlsx') leitos_c.info() leitos_c.head(2)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Leitos adicionados pós COVID:**
leitos_add = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos_add.xlsx') leitos_add.info() leitos_add.head(2)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Respiradores:**
resp = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/respiradores.xlsx') resp.info() resp.head(2)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Profissionais:**
prof = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/profissionais.xlsx') prof.info() prof.head(2)
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Adicionando à `mun_ba`:**
mun_ba['L_Clin'] = 0 mun_ba['L_UTI_Adu'] = 0 mun_ba['L_UTI_Ped'] = 0 mun_ba['L_CInt_Adu'] = 0 mun_ba['L_CInt_Ped'] = 0 mun_ba['LA_Clin'] = 0 mun_ba['LA_UTI_Adu'] = 0 mun_ba['Resp'] = 0 mun_ba['M_Pneumo'] = 0 mun_ba['M_Familia'] = 0 mun_ba['M_Intens'] = 0 mun_ba['Enferm'] = 0 mun_ba['Fisiot'] = 0 mun_ba['Nutric'] = 0 for i, row in mun_ba.iterrows(): try: mun_ba.loc[i,'L_Clin'] = leitos[leitos['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Clínicos'].values[0] except: pass try: mun_ba.loc[i,'L_UTI_Adu'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto I'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto II'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto III'].values[0] except: pass try: mun_ba.loc[i,'L_UTI_Ped'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica I'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica II'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica III'].values[0] except: pass try: mun_ba.loc[i,'L_CInt_Adu'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Unidade de cuidados intermed adulto'].values[0] except: pass try: mun_ba.loc[i,'L_CInt_Ped'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Unidade de cuidados intermed pediatrico'].values[0] except: pass try: mun_ba.loc[i,'LA_Clin'] = leitos_add[leitos_add['MUNICIPIO']==row['NOME']]['L_Clin'].values[0] except: pass try: mun_ba.loc[i,'LA_UTI_Adu'] = leitos_add[leitos_add['MUNICIPIO']==row['NOME']]['L_UTI_Adu'].values[0] except: pass try: mun_ba.loc[i,'Resp'] = resp[resp['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Equipamentos_Existentes'].values[0] except: pass try: mun_ba.loc[i,'M_Pneumo'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico pneumologista'].values[0] except: pass try: mun_ba.loc[i,'M_Familia'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico da Família'].values[0] except: pass try: mun_ba.loc[i,'M_Intens'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico em Medicina Intensiva'].values[0] except: pass try: mun_ba.loc[i,'Enferm'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Enfermeiro'].values[0] except: pass try: mun_ba.loc[i,'Fisiot'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Fisioterapeuta'].values[0] except: pass try: mun_ba.loc[i,'Nutric'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Nutricionista'].values[0] except: pass mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)[['NOME','Pop','prev','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp','M_Pneumo','M_Intens','Fisiot','Enferm']][:14] mun_ba.to_file('NT02 - Bahia/saude_mun_ba.shp')
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.5) Dinâmica do Fluxo de Internaçõe no NRS Sul **(a) Recursos:**
#.isin(mun_ba[mun_ba['NRS']=='Sul']['NOME'].values) nrs_rec = mun_ba[['NRS','Pop','L_Clin','L_UTI_Adu','L_UTI_Ped','L_CInt_Adu','L_CInt_Ped','LA_Clin','LA_UTI_Adu','Resp','M_Pneumo','M_Familia','M_Intens','Enferm','Fisiot','Nutric']].groupby(['NRS']).sum() pd.DataFrame(zip(10000*nrs_rec['L_Clin']/nrs_rec['Pop'],10000*nrs_rec['L_UTI_Adu']/nrs_rec['Pop'],10000*nrs_rec['L_UTI_Ped']/nrs_rec['Pop'], 10000*nrs_rec['Resp']/nrs_rec['Pop'],10000*nrs_rec['M_Pneumo']/nrs_rec['Pop'], 10000*nrs_rec['M_Intens']/nrs_rec['Pop'],10000*nrs_rec['Fisiot']/nrs_rec['Pop'], 10000*nrs_rec['Enferm']/nrs_rec['Pop']), index = (10000*nrs_rec['Enferm']/nrs_rec['Pop']).index, columns = ['L_Clin','L_UTI_Adu','L_UTI_Ped','Resp','M_Pneumo', 'M_Intens','Fisiot','Enferm']) pd.DataFrame(zip(nrs_rec['L_UTI_Adu'],nrs_rec['Resp'],nrs_rec['M_Intens'],nrs_rec['Fisiot']), index = (100000*nrs_rec['Enferm']/nrs_rec['Pop']).index, columns = ['L_UTI_Adu','Resp','M_Intens', 'Fisiot'])
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**(b) Internações hospitalares:** **Interdependência entre NRS's (Matriz OD):**
nrs_names = list(nrs['NM_NRS'].values) nrs_OD = np.zeros([len(nrs_names),len(nrs_names)]) for i, nrs_o in enumerate(nrs_names): muns_o = list(mun_ba[mun_ba['NRS']==nrs_o]['GEOCODIGO'].values) for j, nrs_d in enumerate(nrs_names): muns_d = list(mun_ba[mun_ba['NRS']==nrs_d]['GEOCODIGO'].values) nrs_OD[i,j] = tab_OD[tab_OD['ORI_GC'].isin(muns_o) & tab_OD['DES_GC'].isin(muns_d)]['Qtd'].sum() nrs_od_df = pd.DataFrame(nrs_OD, columns = nrs_names, index = nrs_names).astype(int) nrs_od_df from itertools import product nrs_tab_od = pd.DataFrame(list(product(nrs_names,nrs_names))) nrs_tab_od['flux'] = 0 nrs_tab_od.rename(columns={0:'ORI',1:'DES'}, inplace = True) nrs_tab_od for i, row in nrs_od_df.iterrows(): nrs_tab_od.loc[(nrs_tab_od['ORI']==i),'flux'] = list(row.values) nrs_tab_od nrs_tab_od.to_csv('NT02 - Bahia/nrs_tab_od.csv')
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**P/ cada NRS:**
#Municípios de cada NRS for i in list(nrs['NM_NRS'].values): muns = list(mun_ba[mun_ba['NRS']==i]['NOME'].values) muns_gc = list(mun_ba[mun_ba['NRS']==i]['GEOCODIGO'].values) "NRS "+i+":" "Total de internações: {}".format(tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum()) "Proporção de internações em relação ao total de internações do estado: {:.3f}".format(tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum()/tab_OD['Qtd'].sum()) "Total de internações de residentes do NRS realizadas no próprio NRS: {}".format(tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum()) "Razão entre internações de residentes do NRS atendidas no próprio NRS e o total de internações de residentes no NRS em todo o estado: {:.3f}".format(tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum() \ / tab_OD[tab_OD['ORI_GC'].isin(muns_gc)]['Qtd'].sum()) "Total de internações no NRS de residentes fora do NRS: {}".format(tab_OD[~tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum()) "Proporção de internações no NRS de residentes fora do NRS em relação ao total de internações do NRS: {:.3f}".format(tab_OD[~tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum() \ /tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Dependência do NRS Leste:**
muns = [i for i in list(nrs['NM_NRS'].values) if i!='Leste'] for i in muns: muns_gc = list(mun_ba[mun_ba['NRS']==i]['GEOCODIGO'].values) muns_le = list(mun_ba[mun_ba['NRS']=='Leste']['GEOCODIGO'].values) "Internações de residentes do {} = {}".format(i,tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_le)]['Qtd'].sum()) "Proporção dos atendimentos do NRS {} = {}".format(i,tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_le)]['Qtd'].sum() \ /tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
**Análise do NRS Sul (maior qtd de casos acumulados):**
#Municípios do NRS Sul mun_sul = list(mun_ba[mun_ba['NRS']=='Sul']['NOME'].values) mun_sul_gc = list(mun_ba[mun_ba['NRS']=='Sul']['GEOCODIGO'].values) # Todas as internações demandadas pelos municípios do NRS Sul tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False) # Todas as internações demandadas pelos municípios do NRS Sul que foram atendidas no NRS Sul tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False) # Todas as internações que foram atendidas no NRS Sul de municípios que não foram do NRS Sul tab_OD[~tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False) # Total de internações na Bahia: tab_OD['Qtd'].sum() # Total de internações no NRS Sul: tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() # Percentual de internações no NRS Sul em relação ao total de internações do estado: tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()/tab_OD['Qtd'].sum() # Total de internações no NRS Sul de municípios dentro do NRS Sul: tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() # Razão entre internações realizadas no NRS Sul e o total demandado no NRS Sul tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() \ / tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc)]['Qtd'].sum() # Total de internações no NRS Sul de municípios fora do NRS Sul: tab_OD[~tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() # Total de internações de residentes em municípios do NRS Sul realizadas fora do NRS Sul: tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & ~tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() #Municípios que mais atenderam internações no NRS Sul: tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False) # Percentual das internações nos 10 primeiros: tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)[:10]['Qtd'].sum() \ / tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() muns_10sul = list(map(str,tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)[:10]['DES_GC'].values)) # Recursos materiais mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['NOME','Pop','Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sort_values(by = 'Qtd_Tot', ascending = False) mun_ba[mun_ba['NOME'].isin(['Ilhéus','Itabuna','Jequié'])]['Pop'].sum() \ /mun_ba[mun_ba['NRS']=='Sul']['Pop'].sum() mun_ba[mun_ba['NRS']=='Sul']['Pop'].sum() # Recursos materiais de Itabuna, Ilhéus e Jequié em relação a todo NRS mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sort_values(by = 'Qtd_Tot', ascending = False)[:3].sum() \ / mun_ba[mun_ba['NRS']=='Sul'][['Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sum() # Recursos humanos mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['NOME','Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sort_values(by = 'Qtd_Tot', ascending = False) # Recursos humanos de Itabuna, Ilhéus e Jequié em relação a todo NRS mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sort_values(by = 'Qtd_Tot', ascending = False)[:3].sum() \ / mun_ba[mun_ba['NRS']=='Sul'][['Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sum()
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
(4.6) Fluxo de Internações dos 10 municípios mais prevalentes do NRS Sul:
mun_sul = list(mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)['GEOCODIGO'].values) for i in mun_sul[:10]: orig = [] lst_orig = tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['ORI_GC'].values if len(lst_orig) == 0: "{} não recebeu pacientes".format(mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0]) continue for k, j in enumerate(lst_orig): if k < len(lst_orig) - 1: orig.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0]) else: orig.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0]) print('Intenações com destino a ' + mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0] + ':') qtd = tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'] perc = qtd/tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'].sum() pd.DataFrame(zip(orig,qtd,perc), columns = ['Mun_orig','Qtd','Distr_perc']) for i in mun_sul[:10]: dest = [] lst_dest = tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['DES_GC'].values if len(lst_dest) == 0: continue for k, j in enumerate(lst_dest): if k < len(lst_dest) - 1: dest.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0]) else: dest.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0]) print('Intenações com origem em ' + mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0] + ':') qtd = tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'] perc = qtd/tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'].sum() pd.DataFrame(zip(dest,qtd,perc), columns = ['Mun_dest','Qtd','Distr_perc'])
_____no_output_____
MIT
NT02-Bahia (NRS Sul).ipynb
pedreirajr/GeoCombatCOVID19
Assignment 2 Set 5Image CaptioningDeep Learning (S1-21_DSECLZG524) - DL Group 037 - SEC-3* Arindam Dey - 2020FC04251* Kaushik Dubey - 2020FC04245* Mohammad Attaullah - 2020FC04274 1. Import Libraries/Dataset (0 mark) 1. Import the required libraries 2. Check the GPU available (recommended- use free GPU provided by Google Colab)
import os #COLAB_GPU #print(os.environ ) isCollab = os.getenv('COLAB_GPU', False) and os.getenv('OS', True) print('Collab' if isCollab else 'Local') #libraries import numpy as np import pandas as pd import random # folder import os # Imports packages to view data #pip install opencv-python #pip install opencv-contrib-python import cv2 #pip install glob2 from glob2 import glob #pip install matplotlib import matplotlib.pyplot as plt from PIL import Image #below only works in collab as it doesn't support imshow() directly in Google Collab if isCollab: from google.colab.patches import cv2_imshow #pip install prettytable from prettytable import PrettyTable # visu import matplotlib.pyplot as plt %matplotlib inline #pip install seaborn import seaborn as sns plt.rc('image', cmap='gray') # sklearn #pip install scikit-learn from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split #tensorflow and keras #pip install tensorflow #pip install keras import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, GRU, Embedding from tensorflow.keras.applications import VGG16 from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences #google drive #doesn't work in local import pickle if isCollab: from google.colab import drive drive.mount('/content/drive') import sklearn.metrics as metrics from keras.wrappers.scikit_learn import KerasClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.metrics import classification_report print(tf.__version__) isCollab
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
2. Data Processing(1 mark) Read the pickle file
if isCollab: drivemasterpath = '/content/drive/My Drive/Colab Notebooks/AutoImageCaptioning' else: drivemasterpath = 'D:/OneDrive/Certification/Bits Pilani Data Science/3rd Sem/Deep Learning (S1-21_DSECLZG524)/Assignment 2' imgDatasetPath = drivemasterpath+"/Flicker8k_Dataset" pklFilePath = drivemasterpath+'/set_0.pkl' print(imgDatasetPath,pklFilePath) infile = open(pklFilePath,'rb') best_model = pickle.load(infile) #keep dataobj into file #import pickle # dump : put the data of the object in a file #pickle.dump(obj, open(file_path, "wb")) # dumps : return the object in bytes #data = pickle.dump(obj)
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Plot at least two samples and their captions (use matplotlib/seaborn/any other library).
pics = os.listdir(imgDatasetPath)[25:30] # for 5 images we are showing pic_address = [imgDatasetPath + '/' + pic for pic in pics] pic_address for i in range(0,5): # Load the images norm_img = Image.open(pic_address[i]) #Let's plt these images ## plot normal picture f = plt.figure(figsize= (10,6)) a1 = f.add_subplot(1,2,1) img_plot = plt.imshow(norm_img) a1.set_title(f'Normal {pics[i]}') def load_image(path, size=None): """ Load the image from the given file-path and resize it to the given size if not None. """ # Load the image using PIL. img = Image.open(path) # Resize image if desired. if not size is None: img = img.resize(size=size, resample=Image.LANCZOS) # Convert image to numpy array. img = np.array(img) # Scale image-pixels so they fall between 0.0 and 1.0 img = img / 255.0 # Convert 2-dim gray-scale array to 3-dim RGB array. if (len(img.shape) == 2): img = np.repeat(img[:, :, np.newaxis], 3, axis=2) return img def show_image(idx, train): """ Load and plot an image from the training- or validation-set with the given index. """ if train: # Use an image from the training-set. dir = coco.train_dir filename = filenames_train[idx] captions = captions_train[idx] else: # Use an image from the validation-set. dir = coco.val_dir filename = filenames_val[idx] captions = captions_val[idx] # Path for the image-file. path = os.path.join(dir, filename) # Print the captions for this image. for caption in captions: print(caption) # Load the image and plot it. img = load_image(path) plt.imshow(img) plt.show()
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
3. Model Building (4 mark) 1. Use Pretrained VGG-16 model trained on ImageNet dataset (available publicly on google) for image feature extraction.2. Create 3 layered LSTM layer model and other relevant layers for image caption generation.3. Add L2 regularization to all the LSTM layers. 4. Add one layer of dropout at the appropriate position and give reasons. 5. Choose the appropriate activation function for all the layers. 6. Print the model summary. Use Pretrained VGG-16 model trained on ImageNet dataset (available publicly on google) for image feature extraction. VGG16 is a convolution neural net (CNN ) architecture which was used to win ILSVR(Imagenet) competition in 2014. It is considered to be one of the excellent vision model architecture till date. Most unique thing about VGG16 is that instead of having a large number of hyper-parameter they focused on having convolution layers of 3x3 filter with a stride 1 and always used same padding and maxpool layer of 2x2 filter of stride 2. It follows this arrangement of convolution and max pool layers consistently throughout the whole architecture. In the end it has 2 FC(fully connected layers) followed by a softmax for output. The 16 in VGG16 refers to it has 16 layers that have weights. This network is a pretty large network and it has about 138 million (approx) parameters.![vgg16 Architecture.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAdYAAAEUCAIAAAAZWICyAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAALpfSURBVHhe7L11QBxJo/X9/fPee9eiuLu7u7u7u2twgrsmBEggRiBK3F0JceLu7u4ODJnvzNTsPLPEWDbJJnn6bC+prq6pqq6u/vXpavv/3r17R6dEiRIlSv+GKARTokSJ0r8mCsGUKFGi9K+JQjAlSpQo/WuiEEyJEiVK/5ooBFOiRInSvyYKwZQoUaL0r4lCMCVKlCj9a6IQTIkSJUr/mr4YgpFPd3d3T08Pa/5DMURv3rxhhT6ngedJo9F6e3tZM58Tfg4NZMWRDMWxU5LS3y8IyVAB1sznhJ9z5gn1K4Xob+X5wYohcuBNTYkSpX9FXwbByGT58uUzZ84sKipqbW0FC0CQpUuXzp49Oy8vr729nV3Kq1evQkJCXr58SWY/IWSyevXqWbNmFRcXT5kypY+pJUuWIKagoABlsQmF+PHjx3d0dJDZTwv1hNra2iZMmABssWI/pF27diHZ2LFjKyoqUGEknjdvHtYoKysLObDXCNXAOp44cYLMflp79uxBnnV1dVip169fI2b79u2Iqa2trampQeOQZMiztLR03759ZPbTAmfnzJmDlmlubr58+TIrlk7v6urC5mDNUKJE6bvUl0Hw2bNnASZk9ezZM11d3ePHj4NfgAhiHj58qKmpeenSJSTD7IYNG9TV1V+8eEF+CNYcO3aMXQcwiKSEzpw5k5OTgwRAlbGxMTIE5gA7JH78+LGWltbp06dJylOnTjk7O69Zs4bMQhcuXGAbQOSA+uAvwoC1jY3NzZs3nz59qqOjwy7rfT158iQoKAjYxQ9xzADjtm7d2tDQgEVXrlxRVFTEmpKUIB2qd/jwYTILoWIoiIRxIMEKkjDyjI+Px7ojMjAwEHmicRAgRyxfX9/FixeTlMjNwcFhx44dZBarfPLkSfYBA1VCg7MbrbGxccWKFZjFwWn37t0k8vnz54WFhSiOzFKiROn71JdBMFACK4cAMAEe7d+//+rVq4sWLULmACigDJ5iKVAIYFlaWrIRTMABUwlmAYslJSUkJXT//n0sAm6Qxs7ObtOmTffu3ZsxYwYWAa8GBgaEesDxypUrgRtOBB86dKi6uhpF47cg3YIFC9irid++fft2586dIOwnzDiSTZo0CWREeNSoUbDhFy9ehK/HLFZWXl7+0aNHCN++fXvjxo0RERGcCN62bRtcOUrBz5EJak7ikee0adOI1U1KSpo4cSJqiARkHaOiorC+WITVBFIzMjLYCIYQBmqRIXiNyuD8gMSjwfX09GCl0bDANEE//s6dO7ezszMhIYEko0SJ0vepLzYWTNTR0REWFsY+oYaWLVsWGxsLyoBZxKxxIhjCopkzZ+IEH/w9cuQIK5ZDsJ+wimwPCK1fvz46OhpEw2+BRTg+nHFzIhjCYQAUBq3AX7YnhVABGOrs7GygGThjxX5ccMqurq43btwgs/g5DgOw58gTQFy4cCEqFhkZyYlgCNgFheGa161b934LI0/YdhxjWPPM0wgPD487d+4A3FgjoDkzM5MTwRDIXldX19TUhEMOK4pOhxnn5+dHKTic4BQBLEYk/uI8AC1AIZgSpe9cXxLBAGhlZSXMLBt5sGZjxowBJsCpqVOn4gwdZ/GGhoanTp0Ca0gaCHR2c3MDMd+vDIxzRUUFrC5oi1kkOHjwIFIiT5QCOgPHyBNGdfr06ezBAQjpi4uLvby8gDNWFNMeklkAy8rKigDrE7p+/Tr8NfjLLh0lgq34OcIw76A5Svfx8cEBgPPAg/WFzwUBOY8cRHfv3kXFLl++zD4AIAccQm7duoVScJSCQ0cMjjE4jcDRhaSBsBSriSMcZ56oCSw58kS4vb0dJeK3OJ4hf5A6ODgYWCcpKVGi9B3qiyEYZGxpaQFM9+zZc/78ecQgAMMIIu/btw/whYlby5SEhASM3pMnT8gPwU34X6AWiRHP6ViPHTuGPMHfEydO4LQaMXv37m1tbUUMcoPRQ7YkTwcHB3CNPbaLTHBSD4t64MCBqqoqNhxxSADrEYCBtbe3h0Em8R8U+DthwgSQ8erVqygODbVlyxZgDhVevXo1jPzRo0dJ6Xp6ekjJhh3YitmNGzdu3ry530U/5NnY2Aim37t3jwwmgJgwtrdv30blAXQ0I8kTR4ja2loUTX6ICk+cOBHxaAcyIkHiwWUnJyeSrK2tLT8/H9UgOdTU1Nja2mKVv9QmpkSJ0hfXl0EwOCInJ6egoKCkpKShoXHt2jUgFTGKioqIQeDhw4dIBhQCSdLS0jC2YBBigBLwl9xOAHLNnDkT5GLkSKcjEzU1NZKDrKzsxYsXwVwpKSlSira2NuBIUoJlZmZmkZGRYCKJgX8Ef8mq4QAAJ04sJ44HANOmTZtQEGwmUM5M/gHBflpaWsrIyJDScaYPlomLi5P6gLkEgmA9ykJlUlNT0QjktzhsAL4IoAIIAJ0kHnna2dlh9UmeHR0dOA4ZGxsjjBi0Eiw2STl37lwDA4P4+Hg0I2aRDw487JEWUHjKlCnsDYfZ8vJy5JaRkQHEk8jDhw/HxcUZGRktXrz4i2xiSpQofQ19GQQjEzCOrQ/GEMG1kRhSLv5yjgtjln2J7NN5kpEBInaebAdNLsSRMPTo8dO+PtYsEsNysscWPiGSJxFyQ+asmY+Uzi6Rc0QC4hweISmJkB5izTDFzuH9NUIrsZdC/YrAoQUHALY1hti1/exqUqJE6V/UFxuI+G7VS3u35diz7p7/jG98A7158wYmnQjOlJhZ6NGjR6zYW7dOnz7NPt5QokTpv1M/P4J3nnk+cd2d7t5vt5pwoCUlJVlZWYWFhQUFBXp6epWVlYg/ceJEeHg4IqFRo0YpKSmRy2iUKFH6r9VPjuDbj3uSZ95o63jQQ/t2q7l48WL2WG1XV5eTk1NjY+Pz58+TkpJgfhHZ3d2dkpLi7OxMhsgpUaL0X6ufGcFve9/Vrn4QPe1G69Yn3wzBZ86cSUtLI8Oy5BG7zs7OhoaGiooKcp8vGnzmzJmTJk3Kzs6mEEyJ0n+5floEY7VWHHoePPl2TNvtsWseve35Fqv59OnT5ORkcndaX19fYmLigQMHAGU7O7vm5mbS1IcOHYIFfvPmDYVgSpQo/bQIvnC3J27GvZCpd8JbbuUvvPvmmyC4qqqK3L8MweoCuwgcPHjQy8uL3MMAXxweHn7z5k2EKQRTokTp50Twy7d9pcsfh027HzLtbti0O9nzvzqC0Yxr165tbGzktLpv37598eJFfHw8GQLu7e0FdtmP5FEIpkSJ0k+I4L539PldLyLbHoa23g9tvRvW+i0QfOHChVGjRpGnrp89eware/36dRqNVltby36L5rx58xoaGtgNTiGYEiVKPyGCj1x+EjvjfkTrg9BpD0Lb7jFc8IL7XxXB3d3dcXFx5Om4vr6+3Nxc8pzb5s2bx40bRx6OOHfuXFRUFOcLKygEU6JE6WdD8KNn3WHl6yLbHkS0PgzD1PYgbNq97AUPvh6CQdjq6moyBIzGnD9/fn19PcI3btxISEggzH38+HFoaGi/u4ApBFOiROmnQnAPra9184OgsUci2h5GtD4Ka3sYNv1BWOv97IUPvx6CN23aVFdXR5rxzJkzsbGxL1++BHmTk5PJNyzA6NLS0nXr1jGT/0cUgilRovRTIXj7yTvRU84GTzgWDgS3AcGPgODQ1vujFz76Sgi+fv36qFGjyCsl8TciIuLatWto0sbGxlWrVpE0CFRUVLDf9sAWhWBKlCj9PAi+fP26e+60wMbjPuOOhLc9+hPBD0PbHoxe9PhrIPjt27cpKSkXLlxAuLe3t6ysjFjdnTt3lpeXE+ZiaXh4OOfLkdmiEEyJEqWfBME48Y9OTbBJrPepOxA4+TrgCwqHTX8UNuNh+PRHoxc/+eIIRruNHz+e/QELWN3q6moajUaGgMldwC9fvoyKiiKMfl8UgilRovQzIBh+s7llgqmLaUBBo1v52pDWe0BwxPTHYdMfh898FD79ce7ip18cwZ2dnVVVVcTqXr16NSws7MWLFzgSAKynmd8VRcNWVlauWLGCmfwDohBMiRKlnwHBx08f1jJVsfQysIxMtc9aGtr6ANjFFDaDgeDIGU/yvzSC7927FxcXRz78AcMbGhpK7kibPn06+1Oh69evLy8vJ3ekfVAUgilRovTDI/jlyxcxGR5SagKqpvIWEZmOuWvCpz8hCA6f8Thy5uOomU/ylzz7ggh+8+ZNWlraxYsXEYYLhtUlwxF79+7Nz88nvvjSpUvR0dGcb6N/XxSCKVGi9GMjmNZHG9NYoWsrpWYioWqiYhIU4z2mE+Y3YgZj/CFixpPIGUDw4/ylX8wFo7na2toWLlxIZjdt2lRcXNzb23v37t34+PinT58i8u3btwgfO3aMpPmYKARTokTpx0bw1h2bVI2kFY2ENCwkNMxVDH2D3Mo3hM18FDHzESxwxMwnkYzpcd6yL+aCDx06lJeXR95Fef36dTIETKPRsrKyCHPRnvX19fPnz2cm/5QoBFOiROkHRvCDh/cs3Q1ldIXk9IWktHlkdcWVLI2DGnZHzHwcOYtpgWc9ZiL4Sf7yZ1/kZZX379+PiYkhdwG/fPkyMTHx5MmTaMAZM2bMmzePpNm8eTMYPZBWpRBMiRKlHxXBPb09eSWZUtrC4lq8kloCCoZiSkZSKlaGPrWro1jwZYJ41tPIWU8KVjz/5wiG883NzT1y5AiZbWpqmjVrFlrv8OHDYC657Hbr1q2IiIgBgpVCMCVKlH5IBKPOS5YvklOVFFPjBXzFVPjk9MQV9aXVrWzME+ujpj+LIP53FqanUUDw8i8wEDGbKRLesWNHeno6qvHo0aOEhAS4Y0S+ffs2KSnps0PAbFEIpkSJ0g+J4KvXrplaGqrqyYqq8EhqCogp88toiioZqWo7xbjkrmHwF+Z35tPI2bDADAQX/mMXTKwuGQK+efNmWFgYufIGX3zgwAEE0IzNzc1sRg9EFIIpUaL04yH49ZvX2cWZ6gaKGuayYkoCkhqC0uqiEsrCKtZmRmFjwprPAcFRMx8z+Utc8FMg+J+4YIAyOTn5wYMHCJMvbx48eBDtNnfu3La2NpKmq6uL/cm4AYpCMCVKlH4wBPe965u3ZIaUprSEmqKMppCitrSUupi4krCQLL+Oq4dDzrLQ5jNRMx5FzXoUNftp9Own8MLRs/8Rgvv6+oqLi3fv3o0w2mrq1KnTp09H4NixY6NHjwaREf/o0aPAwEDC6IGLQjAlSpR+MARfvHLaLdhc08JU1cxFRklcRE5AUIZHTEFYQklcz9UtaGyHZ/mO2JlnomfdBnkxRc16Fj37WeGKF4MeiFiyZMmkSZNIKx04cAAWuLe399WrV3Fxcbdv30YknC88MnwxM/nfEIVgSpQo/UgIfvX6RVCik7mjvom7raVPtLi0tLCsmJCMAI8ol7yWrLqlrlt+s+voeXax+e5l60DeKEyznjMQvPzl4Fzw8ePHMzMzyQNv9+7dCw0NhdUFgktLS7u6uhCJ1pvGFDP53xOFYEqUKP0wCAYHJ82s07dXcPS213MxNHbxFJOWk1ZTVtRREZUTkVQVFdcSMw1PsgjLFZRVt46fHD0b8CXTs4IVg0Hws2fP4HnJ145R+qhRo/bs2YPwypUrJ0yYQNrt6NGjiH/z5g3jB39TFIIpUaL0wyB4+55NenbyNj6O3lExJk76+rZOSlo66ibaqoaqglL88tqS8gbi2s7utoGjbB18fXIXRs1h8DeG8fdZ4YpXfxfBNBqtqqpq27ZtCBOr29raisClS5fAXPI5okePHoWFhZHhiEGIQjAlSpR+DAQ/ff44ONFR3ULA0CvEKSpWy1RP395Xy9hCSV9Ww1RBWEZIUU9S0VDRwM3Hxj0kJjErsHwVEBwz50Vs+/PoOc8L/j6CYXWbmppI+MCBA8BuT0/PixcvEhIS2EPAOTk5hNGDE4VgSpQo/QAI7uvrm7d0alSas76juLa9g4WHvZ6VmYGjn56ZmZapvKm9lqyKtLKhpJy+nJZDQGxSXmRcVlDF6ug5L4DguPbnMe1/G8GXL19OTk4mVhfYDQ0NvXbtGnzxmDFjtmzZQtLMnz+/rq6ODBMPThSCKVGi9L0j+B393ZLli33CbLLL3QztJSWVxHQtdWzcnAxt7VV11FS0Zczt9bRNVNTMxCXVJUyDixcuXhUWOSqocn0swwK/jJv7Iqb9xd9C8Js3b2JiYsgQcG9vb1ZW1s6dOxFet27d+PHjCXNPnjyJNOSOtEGLQjAlSpS+dwRfvHTB1sXG2dcsfrSdV7Chmq6MuKKMir6Jso66koaClqGSq4etjrmyoqGQgoGyXWxp24x5MQnZYWM6CILJVLjy1dveAa0mCFtdXd3R0UFmFyxYMG7cOARu3LgBX0ze//vkyZPw8HDEMJMMXhSCKVGi9F0j+O3bt+FxgSY2xtau+vGjfaxcVV2DddUNDDUsI5TV1dQNFEwdlK1sLVT0ZWS0+EVVBJyii8fUT45JHh0+rjO2/UXc3FcEwUVA8MBc8Pr16xsbG0mbnDp1Ki4u7vXr169evUpKSrp69SoiaTRaUVHRpk2bmMn/kSgEU6JE6ftFMCrWvnC6nrmyub2pmoGslYeusaOynoOClKamW3KzmIyktomSjpWUoZm5pKKwjJagtJaoa0Jl85S50aPywuq2grxAcPzcV3HtL4tXDQjBMLajRo0iVvf58+ehoaHXr19HNQDl1atXkzQrV66ETf4nQ8BsUQimRInS94vgW7evx6T6yakJy6rKSSvLKGpLa1vp69gaiaso24cVSatoahoo6loqmdpYyCiLyumIymhJ6XslzZq31tY1KLBqI+DLnkqA4M8NRJAv0pPPEZEv0m/YsAHhnTt3IkzeRUm+SD+4u4DfF4VgSpQofacIfvX61ZiGSlc/SyUtaUUNNTFZGVl1GW0rMyUDLSExUW1rRyN7O30zTQ0DFSsne1UdGRktIVl9OYuwokmzVilpGHqXbYqb+/JPBL/8LILhamF116xZQ2ZXrFhBvo587dq1+Ph4ctkN7hj8JZ/p/CKiEEyJEqXvEcGo0qwFM+PSItT05ZS1ZbSMtGQ11GQ1ZKS0lFT0NcSlxGS0ZKXVpLT0FdW0lTQNtMTlBBV0xdTMNexSmuMKJkrKqQSOO0BGITAlzH1ZvPrN215W5h/U9u3by8vLYX4Rvnr1KlD7/Plz+GJQ8tSpUyTNp79IPwhRCKZEidL3iOBDx08m5yZaOhuq6Mqo6MnKq0moGetJqUiqmOor6qpIyUvxiHJLKYtp6ipKyAjLqkhLK4lLqQvJ6iq6lyxzi60Sl1WIbr0aN+81pvh5r/C3hIHgj67m7du3ExISXr58iTAZAob5RbO0tLQsWbKEpFm7dm1ZWdmXbSsKwZQoUfruEPzk2ZPRldXho8LN7PU0jZRk1MTFZIX0bQw0TdRVTJRMnJ3kNHREZEVF5YSUNaUFxHgkleQUNKWEFbgVTYzc81uNPFOEZdQS2h/FM/gLCr/E39KPu+DXr19nZmaePXsWYXJH2vLlyxHet29fUVERuex28eLFmJiYZ8+eMX7w5UQhmBIlSt8Xgvv63k2e2+oaGuAa6OLoaa2mpySpIsYjyqVhqhIY561lpqBtZaFiYikkIygkw6+iLcctzCOvoaxuJC+mIqjr4qjhHGTmlyaiYpy04CnIG9v+NHbuk8T5r8rWfNgFs60uaYT169eXlpb2Mr9ID+aS72L09PTExsaePHmS+YsvKQrBlChR+r4QvG3PCcdgP5dAh5BYPwMLTQk5MQ0jRQklUWMHbVtvAws3XU1jAx0bay5BbhEpAXU9NVFpKXl1SX1rdUkNAXU7Q12XQLfYLFk9m+QFj4Hg0Emno2Y/SJz/unTVh1/Zvn//flhdMgR8+fLlyMjIF8wv0sMXnzhxApFwwXV1dQsWLGAm/8KiEEyJEqXvCMG3bt9JLJ7iGeHpF+FaNibbwknPzEZfy0hFQknY3EXT0lPN1Eldy0RTVEWZR4hfWELAwEJfUk5cS1/R3kdfzUxG39nG3D9Gw8ZA0dA2ef7DhLkv3cs6E+a/SJj3snTBpe7e/nfyAn/R0dHE6r5584b9Rfq2trb58+eTZuno6Bg9evRXaiIKwZQoUfpeEEzr6y2qm+KbMSki1c87xDY9P9TSUTciyUPbUENVT9rYXsM7xsTSSd/S0UpaVVFIQlBYgl9WVUJAnEdTT8HMRcXMVVfH0UzH0UrBSMrEKShm1q3YmQ8DG44lLHidMOPW0rUHev/6Vbfu7u6srCxiddEC9fX18+bNQxi+uLCwkAwBX79+PSIi4smTJ4wffAVRCKZEidL3guB9RzqCRjd7JNYk5kVEJrlllgTbupr4RTg6eFjauOm7BVv6xNrqWVtqG+rKKsvyi3JjklYSldcQl1ISlNcW17JQEtcQEVMTVTRR1rLyj2y9Ej3jVtTMe0kLXhbMPNY5d2Zv91tWSUzNmjWrvb2drPv27duBYxqN9vjx4+Tk5Lt37yLy7du3o0aNOnLkCDP5VxGFYEqUKH0XCL52915ObVpszSr/1Oqo9ICswsjoDFc9K10LF309c3VrVwNdKwXPKA91EwsReV1BCRlBMQF+UR5+US5ZdRFhWT5pFQkVA2lJFSEJFREtR3tF0/CwKWdCJl1IWPgytvXSjLYlnZPGciIYYM3NzSVDwDdu3AgKCnr58iUQnJeXR75IDxc8ceLEv/VF+kGIQjAlSpT+fQS/7u6rbd8QnhEdWTXbMyEtszgmpyI6LNlT3VhXy1TR1EFdXV9aRlPE1tdKVcfAISRFWd+cX5SPV4SLV5hLVEZEXEFUXl1WSZfxETlJNTGv9BpN1zy/cbuB4OT5LzIaVy2sLlrf0tzz54slHzx4kJiYyP4iPazuoUOHEIYpnjlzJjMJfceOHeDj3/oi/SBEIZgSJUr/PoLX7LmY0bworjDVOiTBLzZu6uy6nIqYwGhPY0dHNT0FHXMZcXlBaVUJeT0pRQ1NQydnTRMjXiFufjHeETzDuAW5BSUFxWVFlHTExBQlJLUVrWJG63mX+VRtiJt5N2Ha5bKC/JU1hQtnzyQ8xcoWFxeTT8BBU6ZMmTFjBiJPnDgBIL59y3DKz5498/X1/btfpB+EKARTokTpX0bwuVsvyhZeKWqZFpYWZOIelF1WUtWQk1USH54Q7hQS5OBpFZHqJSYnKConIqWirqSpaWhvqaqvyifMIyDGy8U3lFeQR0hSQElTWs1AWslARc3axiKuziKi0W309OQF9zPqlk7NT5hdVzl//nyC4IULF7a0tJCid+/enZ6e3tfX9/z587i4uHv37iESFI6Pjz9+/DhJ81VFIZgSJUr/JoJfvn5Vv+xIwZIH6Q3jfSMdgpKzaprLG6eWhib4hMaFBMUEh8S4u4YZSykKqRko61o6qRuqaxhrCEnz8wiOFBDnE5bm4+bnFhTn1TFSVNWTkdGS1nPx9q/ZZRszVctzVGjDppri4ql5cVObmhYvWQwEA6wZGRnknWdgX2ho6N27dzHb74v0bEZ/bVEIpkSJ0r+GYJS7fveWnFknsuddyG9bbGCrHZ+dlF0ePn5qqY27oZmzSVZRXFiyvYqJJI8Ql4q+tr6ltZSauKSyiJi8CLfgCCFxfn5JLj5hXiEJHhUtWVEFfmktGbvosMwF5x0SplqGFzrEFEzNS5pQltsyfcaKFasePHiQnJxMvrzZ29ubkpJCsLt8+fJJkyaRRjh06FBqaioZjvgGohBMiRKlfw3B565dG103Jq3tUPbMfcVtbQ6+FuUNhXlVCWObSlQN5PUs9RIyPV2DdMSVBbkFueXVldQMtGS1ZHjFuHhERvIIDReXExSU4hGW4heWFFDTkxdR4BdTFpEzMQytnGQakucYW+ISkbVobElrW8us9vlLly8rKysjn4DD+k6dOpVY3YsXL44aNYq8//fRo0chISFkOOLbiEIwJUqU/h0EP3zZN3ndodTaWdmzzoyesnJ0XZGVl354UnB+ZWJsWoCetYZPqEdSZrCBlZKipgyX8Eh+cV55TTkhOUFxBTEJRVF+cS4hSV5uoeESCkJS8hJyaqKKOjLC8uKaNk42QSFiWoaGzn7xBfULpjXNaZ8/Z+68tLRU9ueIDh48mJaWBqv78uXLmJgYti/Ozc39J1+kH4QoBFOiROlfQHBvH31O19sxK89lTdud0X6tZPr8gAR3CzeT8rri5NwgzzAHW3fT+PTwoChXVV1ZQQk+PjEBETkhUTkhEVlBVV1VCQVxXpGR3AIjRvINFZTiVlaTlVIWkNEQUTDQMQtI0HIIEJJXU9C3tvYLntc+Z968hbVjx0RHRxOr++TJk7CwsJs3b9JotJqaGjZz29vbGxoaBtIUYHe/5+WAUc5PKd+/f//06dMDea0aG8Go2+PHj0kkEWbJbctEDx48QJ7kWeoBqqenhzM9Mjx16hRnKTgCIU94f9Y8JUqU/g39CwjuutxXtvrR2NXHsmcdKlx6NyI/2yPUPjQhoGRMpp2XoUuAja2bWXiir4mtpqyq+Aj+ETKqckIyggjwCnJxCY4YyvOHgAgvF99IAVFeWXUJNU05aTVBcWUBNSNt18RUr4xqSTUDRUMHSzf7BYsXzpnb7uLicu3aNZQL7I4ePXr79u0Ib9iwAcwll+bOnDkTFRX12c8RgYlbtmyBWZ46dSqJgYNeunQp8j937hyJOXTo0KJFi1atWgXQ7927l0R+TEAw2LpmzRrUavz48STy+fPny5cvd3BwuHXrFonp6upauHAh8gwNDf1snlBfXx8OLXl5eex6Ar7z589fu3ZtZGTk5s2bEXPjxo1ly5bt37+/tbV19+7dJBklSpS+vb41gm88fle1oa946eWCmVsKZh8oW3g0MisuJCksJT8qOS/U3ssiLt3fP9xDy1DF0EZDWFaYS4BLWIqfT5RXSEKYh3/EcP5h3Pwj+AV5eAR5hvEO5RfjUVCWkFETkFIVlVSTNvM01/UIE1FQkVDRcfJynr9oQWBQIGEuVhNWt7GxEeErV66kpKS8fv0aYfhQ4PLOnTuMyn1OsJbgWlNTE5kF7F69euXh4QGIkxhkRR70APL8/PzIuybgN1esWMF2tSeYQoC4YDho5Dl27FiyFPVEKTY2NuQj+ZhNSkoiz+ytX7/e2tqaHDZQLpBK8ofgZ48dO0bCEHKYNGnShAkTyGxWVtbGjRsR2LVrl76+PpY2Nzd3dnYi80uXLrGLpkSJ0rfXN0Xw6x566653hav7Cpc+zJu+LXfeqaLW+WllaXF5cRHpLsaOWiYOujYe+tomKvrm6pomKiLyooJiggKiPIoa8oLiAlz8I0bwDRnGNVRAmHcYz7BhPENG8g8TFuOTURMUUxCU0ZI2dNBSNreRUFESVVDKKczJycsBX8gKwgnGx8eDXAAi+Hv58mVEAmdFRUUETwPUtGnT2AiGgDNPT082gsFBcrKPZImJiaRo/F2yZAl5BuTw4cPl5eXkI83sgYiWlpZ+HLS1tSUIho4ePUqGC+CF/f392dsLWEeeWAUAvbS0lOTJ1pQpU9gIRvXIMQbUdnZ2Brjhf1VUVPCriRMnsr/MRIkSpW+vb4dgFLPlzLui1X35QPCqnrz2I7mLrmdXl+fVZoSkO0Zl24Umett7W4C8mkbK4Yku9t7m6gbKw7mHwwLLqCryivBwCwLBQ/mEeCWkhPmEecDfYSP/EBDiFZPjl1QWktOVMXO3k1DXElOUUtHVG5WaEhEZgfN6FE0+R0S+SI9TfvYX6UGxmpoa4isHqE8jmOjBgwfe3t4XLlxgzTP9Mk78R48eXVJSwh4mHiCCie7evYs8yZGDCOuyfPnysrIyHEXeH3rmRDAR0gQEBJCnTlBDVMbd3V1ZWZn9rCAlSpS+vb4dgi/df1e2tg8WGAguWPW6YNnF8iXnkvPSozO9PaJ0Ewvcg2JcTBy0rF2sNI0UwhI89SxUBKX4eQR5h/ENE1PSFBDn4wGCeYcKiPErqcnIKksKSfCO4B7GzcOFRULSfDIaMqpmBkbukcKyYg7uvvb2DlevXkW5vb29BQUFW7duRRh/q6qqyCqTIWDOK2kD0WcRDL9ZUVFx+/btfliEmXVxcZk0aRKb+ANH8L179yorK69duwaLzbm9kGdgYODUqVM5r90R9UMwfjhmzJjz588/efIEOWRmZuKAhJoA4vDFrESUKFH65vpGCH7xlj5xO8MCFzARnL/icXb7qZoF+7xiolzDLfwTTIvHxSXnRlm66ocnBGoZq7oH2Vs7m4rKSvKLCvCJ82iYWgzh/n0Ez5ARvMMEJHgV1CRFpQT4REeO4Bo2kothkwUk+dRMFOQNdbTs3PglhSXllFauXEmKRgDYBW4AtYSEhFevXiHy5cuXkZGRnEZ1gAIuOdEGBHOOBcOrFhcX79q1C3CcM2cOiYQwS/zvwoULZ8+eTdqcE8HgIzMhSzY2NkAkCcOxIs/du3efOHECydjbC4UWFhY+ffoU/pqMSJB4osmTJ7Mv8aFclN7Z2YkcEAlLHh4eTgZMUP+cnBySjBIlSt9e3wLBfe/oyw73Fa95V7Tm3Z8Ifjp68b38qSscI+JcQny8Y52S86Ns3Cx1zDUcPS2MrY2c/K3k1cXF5OVH8A4XkRXgFuIdzjN0yPDfhnEPEZbiV9aSFpUS5BfjEhLh5+Hh4hYayS3GpWgsK6NjIKIowy/OW/Dna9cvXboUFhYG4JLPdMIGIhKrDKitX7+eWbuBCk4TsBs1alRycvLSpUuRCQwvzCZ8KCzq4cOHkQasBz3t7OxgY9vb28kPX7x4UVtbS9CPXy1ZsmT//v0IEwQvWLAgLS0tKSlp8eLFwOjz58+Rp7+/P/Ls6upC+vr6enae7JsckFtdXR25iwNpVqxYQfIks2vWrElJSUlMTEQd0A5APDsHckHy9OnTDQ0NGzduBLupsWBKlP5FfQsEn7z1rnRNX/Ha/yA4bw0tb8Wj8palUfmlMbnJYSnRUXk1Zi4ugrLCyjryprYmxvZ6onIi/JLiXMx70YaN/GMEz7Chw38HggXF+WRVJMSlRUSk+EZwDYER5hfjEZLlFVUS1LAwFZIWtrGzJtemgLOIiIgrV64gDHixP9O5evXqiooKwuhvIBTaz6KSWbYLHoT65fl+EZ8V0oPj749gUKJE6VvqqyP40Uv6+I53JWveEQQXrunLB3/X9FasuJA9tiF7fKtPYkh6cUpy/QpbHy8ZDQVVHUWfEEcXP3teEV4BSZGRfCNH8Awfxv3HMK6hw0cOYSKYX0iSn1+Ql0tgKDcvA9DCUgIyGmKCMvySqiLyynJkeAGEhZGEPUR4z549sL2EuRcvXoyOju53/8CX1+vX9GPH6EePMiYY5I0bgUlEX79+/ShTR44cCQ8P37Zt2/bt24kP7e7uPnnyJFlKhDSbNm2iKEmJ0k+sr4vg3j76/AP/scAEwQVraAWrXxfOXBWekZxW1xRVXJlSGO9XNFtJT1nfStfIRtMjxN4jyIlPlFtMVkJMWnQI1++wwEOG/zF0xB/DuIeKSArzCPLwCfKMFPiDi284nygXnwiPgAwvjyi3kAQ/+2vH69evLykpwdrdvXs3NjaW3Brx9u3bmJiYr37qjSatrqbn59Pr6hiTmRnd3x+R58+fR03qOFRWVjZixIjNmzejni0tLVlZWawFTAUGBurp6X3tYyQlSpT+RX1dBO+70le2Dhb4LwguXEMrWnolqXaKV0JceHFGwpi5yaX5ceNWyGkpmTgY6lmp23rbKerISyqKSCpKSMmLD+EaMmTYkD+G/gYKj+QbwS0wkl+EX1ZRiltk2Ei+YdyCw3iFePgl+EbwDUtNTe1hvhf4ypUrERGMO9KwdmlpaezHFsaMGbN48WIS/oravBklwYczwmfP0j086ElJMLNJSUnw4MwUDKFuNTU1/v7+u3btOnjwYH5+Pqk80b1790JDQ+Pi4ljzlChR+hn1FRF86yl9zKZ3pWvflaztI/wtWgsEvytYSxs9a1dx66aYotLQ7JyyWWvjqmdH1y7WMFFz8LHUt9Cy87JQ1JZR1pFV01fjERg5nGvo70N/+2PYr38M+w2cHck/ApFCkoK8YiN5RUbyC3PxCPCM5BtpbGr88uVLlIu/CQkJp0+fxqrBWrKZu3Hjxry8PBL+irpwgZ6cTCcwffKEHhpKP3asLzl57Nix5OFgtjZt2lRQUNDe3r506dL4+Hji04m6mR943rNnD/6yoihRovQz6msh+E0PffZeBn8JguGCMYG/hWv7Cta9TmtaUjZ9SWzFFJ/sCVVzto2ecyG6bKJHuFFIspOBtbqRo5KagYKavoKKrgavIC+34IhhXENHcA/7Y8RvwpL8PELcMMKMG9EkeHhEhvMJcXHxcomKi7K/dtzY2Dh37lys1759+4qKishY6s2bN+GLv/pbaV69omdkMCgMoWHz8+kbNtAfPOjQ06uuruZs6rt37wYFBT158gRVdXJyIo81swUuNzU19fX1UQimROnn1ldBMHLcdv5d+fp3peuYCF4HBDOmQlB4XV/R6kdZkzsSxi0oXnAqt/3c2HWPyte/DkrPjkgz8422NLRVUzORllGTkFWV0jTSNbE04hXm4uLnGsY1BBaYB8DlH8klMIJHmJtfjEdEhnfIiN+HDR82a9YssiLbtm3Lzs6mMb9IjxP/+/fvI/Lt27fwxV/9c0SowMSJ9D8fvaPDfdfX0/v6bl28mODuzmlyX79+HR0dffbsWYTr6+th1Tm3wtGjR1FbMihBIZgSpZ9bXwXBVx6+q9r4rmx9X+n6PvC3ZB0sMAPBReAvYta/Llpxq2zJgdwZHbmLbhRvfFuz7kZQanhUmltUqrejj6mRvaakkoi5nbmehY60kugIviEj+YaP4B0+jHfIMJ6hw3mHD+cdyivCzSvKzSc+fOiIPwAsYnWvX78eFhYG2GE2Pz//4MGDiISXnDhxIueDEl9LnZ30ykoGiKHTp+nx8TDFb968SUlJuXTpEjMFQ6jP+PHj582bhzBom5mZyXk/GXw6VoH9mjQKwZQo/dz68gh+8ZY+ZVdf+QYGf5kIhgtmGGGEizFtAIV7y9ZcrZi3JbR6dd7yJyUbe4vbVwQmO4wuTw6IcXEPtHTwMRWTF7J1sdU0UeIV5+IV5uYWHPnHiN/hgrn4uIbzDBvJPwz85RIczi82QktH6wHza8ewjYmJiYAawjiRb2trY1aHcUdaRkYG55WuryJAMy6OTu51e/OGHhVFP3sWtJ0yZcry5cuZKVjau3cvqQ98ekxMDOetwUgPC98JlP8pCsGUKP3c+sIIpr2jrz7B8L+cFhgTwoxIcHnDu9KNtPJlJ4rmdpWsfoGYsk29tfPmBibaB0Q7BsW6BsU5mjlricoJyKqIqxkocAmNGML1u6K6HLfASG6+EcO4ho3kG8EjxMUrxs0tPEJcWmTfvn0oF2sxadKkWbNmIQwK5+TkEOaCzoGBgYN+AmKgQllJSXRidfv66CUl9LVrEdy5c2d5eTmnyb19+3ZoaCisLmhbWFhI3kJJhFVYsGBBQ0MDa54pCsGUKP3c+sIIPn2HXrEBFvgdg7Z/Urh0HQO+ZCrfhKm3sL2zfNm5so20sg20ik1v6+Y3+0Sbh8Z5RY8KCk60N3ZQE5MTFBDjNrTSGcl4O9owNR1lXgFuLt4RQ4YPGcY9FKYYFhi+mP3A7q5du8gZ/bNnzxISEsjniLq7u+GL2ZfpvpbQgBMn0tlWd80aBoLfvQP94+LiOD+xgfqkpaWRJ4mXLFmCynM2/okTJ1Bz8hZjtigEU6L0c+tLIvjxK3rjVuJ2YXsZzpeBYObfso0s/lYwpu6s5kWj511CuGxjb/XGJ5UtZdbuBjbOJmGxfuau6hrGcmIyYkqa8nLqUsN4h/GJ8PAKcQ0DfkcOGTqCMS48UmD4SP5hQcFB8JIo9/79+2FhYffu3evt7S0pKSGflsB6gXHs4YivqJ076eXlDPMLwQhHR9OfPXv79m16enq/l1i2tLQQ7J46dSojI4OMXxO9fPkyIiKCc8iYiEIwJUo/t74Ygnto9AWH3pUzgcuYGEb4Hf6WbESYmN++is1MBG98kztrb+n67spN7yo29xbNXplTHe0VYm/tbJA2OsnUSUNVR0FKTkrbRENYWnAoz1Au/uH8IryMp5O5hgznHsa4NVhgmLauFrG6oHBycjIZjli6dOmUKVPIGuEcHxD87OeI/qnu36cnJtLJvW4wsHFxdCZ2W1tb2c/pER08eBD17OnpefHiRVJS0s2bN1kLmKtQVFS0YcMG1jyHKARTovRz64sheO+VdxXrmaPAzAGHPxHcV7KBVsbi7zv8rdxEq9zUXbLmccVmWtXmvuotr0LTwxPznOOzfUISXLOLop38TARE+fhF+JR05PlEeXiEuHmFuPmEBbh4R4zgHTGUa8hQ7iF8grzsL9JPmzYN2EX4ypUro0aNIify5L6Cu3fvMmr29USjMe4CZn6FiOGCx47FQQDBQ4cO5eXlcV4AJI+6wa2jwuXl5f0+1bxixQpEsmb+KgrBlCj93PoyCL7z7N2Yzf/hL2PMgTHywBgRZo72grxMC8yYaBVbeis391ZsoVV30Bq2PozJDUwudPWNNHf0NQyOtbPxMJKWl+EV5pFTk+YV4eXmHykkxs8jyBwIHjlkyMjfh4z4o6GR9bXjvXv3koeSnz17Fh0dTe4CJo+WffWvUqIC06bR2Va3o4Oemwsoow5xcXGcdwGjeuz6wKdPnDiRxBOdP38+KiqKPNf3vigEU6L0c+sLIPhtL33W3r4yYoEZQxAsBLOmTX1wuwzzi7+YttDKtyBMq+qgjdlGq99wOjjVOTzNPizewzPM3MpV087TVFFNSVRKREpBHCDmERguIiUgKMY/gnvoMO6hI3iGenl7EqsLzMFaki/SV1dXs+/lmj9/fkNDAxkm/oo6cICel8d6EPnBA3pAAP3xY9C2sLCQ3I/MFupDXpQO2mZmZpIXBxNhRcBf8hbjD4pCMCVKP7f+KYLx880nnpeDufC85Mobk7/MMV+m/2WMNjAozADxFtZU1dFX20lr3Nk9ZsHS6Gy34nFxhVWxvlH2kYn+StpS6roqCiryMkrSgmJ8I3hH8IkKDB05BAjm4huhqKRIvugDq5udnb1r1y6E161b19jYSJh74sQJQI1zEOCr6PFjxpMXzA9PMIaAo6MZr+Oh0+fNm9fW1sbZpEeOHImPj0d9QNtRo0aRlxcT4chRW1vb767hfqIQTInSz61/iuBDx46UzDlVziAvY/CXyV/mmC+TuWQCgqu3vKva/I7JX/x9V7OVVreNNr7jYd20+uK69JqmrJB4GzNnA0cvazkNKSV1OSV1BT0TbX5RPj4RPgl5+RE8I0ZwDxvJM4I9ijp37lzyAYjLly+zv0hPhoAH+EX6wau7m15QQCdvdUDrNTXRmY/egf4wuZz3OTx9+jQkJAQ+HYeHsWPH9vtU84YNG4qKijjvGn5fFIIpUfq59Y8Q/OzlS/+EyOwZZxkIXk8Q/A4IrthEYzjfjr5qTOBvxzsSqCJTB21sZ2/r7lcLd10uryuqbykpG5fl5G/kGuCuY6olJC0sIMYvJCFgaW/ML84tKiMio6I8gnv4H8N+r6ysJLU9depUXFwczuhfvnyZmppKrCVYBqJt2rSJWbWvqQUL6G1tDPhCe/YAk4Dy48ePY2Nj+z3qlpOTQz4bCvhyfvYNwnEiODiY867hD4pCMCVKP7cGj2CcXE+aM9vc26dk6ZNyxvgvi7/lm97BAtds7avGxKRwzVbYXkaginkJrnZb75Tdb47efHXw2PGKupzssoTUwmhrd8PQ+GATez1RWWlJeXFuAW51PSVhaT5eIV4BCeGhI/7w8PAgn7oAtmB1YS1R83Hjxq1ZswaRCC9ZsgSY++pDwKdO0TMzGUYYgt0OD6c/fIhCi4uLyf3IbC1cuLCurg6B69evJyUlcV5ww8EjKipqIB8PpRBMidLPrcEjeMv2LRZeti7hcWXkOeP/WGCG4QVzQV6C4GoguOMdyxR39rTsernhxLOLV27u2tM1c35rRWOxV6S9uZOWqYOesa2ejaubjIrUSH4uDX1VGWUJbn4uGRW5//nf/7GwsAB5IVdXV3JG39HRISYmFhoaikic7wcGBn71u4CfPaPHxDDuBYZ6eujp6XQmdjnvRyaCT4+PjwdqUSXwl/OZCyRraGggr+n5rCgEU6L0c2uQCL55+6aVq4WmlbKOvUf+0vuljEeN+0phgTcyLDBBcC2DwkwvzMRx5dZ3VVv7mne9vXC/++GrnoOHD128dLlj57al6xe4+tmbOBjoWRtpGKrrW5oamOspaaiY2xsoacuO5BshICpoYGjY/afgH8l7eeB/p0+fTiL3799Phoa/onp76cXFhLkMtbTQp0wBUN9/1A3kjYiIuHz5MtwxatXvU82dnZ39PpDxCVEIpkTp59YgETxr/ixZbRkNSzVjj/CCpU9LN/YyHkFmPO3GuOZGRh7GgMKdTCO8lQYKV3XCAtM6zr/t7ul9+erNidOnXr15tWPPttmLpkUmxrqEhpk42GibaIjICcooS2obaXgEWgvLig0dOYSbjxsIZhVMhw2NIQheu3bt7NmzSeSRI0fGjx9Pwl9LS5fSp05lPYh86BDj0xjd3S9evEhOTr5+/TozBUPAbllZGeqG8M6dO6uqqjjpfOfOHXj2x+RWigGIQjAlSj+3BonghonjZTSk5fTUlcw9qzf2lG98Uw4jzHz+omoLg7y1nX1jmX/B35pOWs22vuptffU7eq4/6Xn9tufFy7cPHj56+erFus2rS2vzPIPczN0dTO2tFbVk1AwVRKSF9cw0Tay11Iwshgz/feiIIQaGBqyC/y0EX7xIT0tjvIUSun+fHhJChoArKio43y0JrVy5sry8HK16//792NjYZ8+esRYw3xw/atQo9gcykObNmzf4S6PRYORJJIQwyE4GtT+LYOSJHJAYAVbUnzl8+l4LTpELm6RECLVCGOKsFWz78+fPB54nJUqUBqJBInhiy2Q5LSVRFVl957CKdU+KFh4r3/C2YhOtYjOtuoNBXjaCAd9a5oTA9AO9z9/2Pn3Z/ep1T093z+s3r9Z1rCyuzvAIdLTzstM20ZVVlZJWlhEQE3Zws/EOcdI2t2N8uHP4H4b/LoKB0cRExhuBoZ4e+ujRjFfzMB8sbm5uZkT+qQsXLpAv5INiqamp586dYy1gatKkSZyvDYIXdnR09PLyCg8P37JlC4ncu3fv1KlTly9fPnnyZEDw0wgGdgMDAz08POCsly1bRiKPHj2K365evXrChAkDISYOITNmzKirqyssLCTgdmcqNDQUK0jSnDhxYuLEiatWrRo7duzgOgwlSpQ+qMEORCxaoGFuLSgnKWNo7VkwLaS0pXTdG/C3cjONWOAx295xIngMELy9b9bBnlfdtEfPerq7+7CrP3vxdP6qFv9wd0sXXWtPKykVSWUdJTlVRS5BHg8/B1NHbWkVlaEjho7gHWZo9O8hGO1TW0tn3+u2cCG9oYHe13fx4sX09HT4R1Y881E31I18jmjatGn9XtNz+PDhxMREziHghw8fNjQ07Ny58/r166AtYm7cuAHwAd/37t2rqalBExEEHzx4kL2ZsJT9lmGEwXTkQMadEfPo0aOgoCCQFK4WDp1tjdE+JAGEbMlbjSAUFBAQgEWQr68v6oyl+CHJE2GkgZ0H6JHn06dPS0pK2PlQokTpn2uQCJ63cqORZ4qkhqmgnI6cZWDKlMPlTAtctYUBX0zAbh2DvIxA7XbGs8hjt/e2H+599Yb2+FkvjUHgnvNXTkanByWmZ6qbyurZGqvqq8hpKMqoyAlJCukYq9h5G4jKiPGL8nILjTAy/vfGgtevpzPvLWPo6FF6bCy9t/djj7oRK7p///78/HzOhr1z5w5I1+9FEEBwU1MTbC94RxYVFxfDje7Zs6ejo4OMYBAEr1mzBtaYDAWAj+TLIBAQPHPmTOSwfft2kr6lpSUlJQU5wFZz3qS8Y8eOcePGgcio+ZgxY9gIJjmQMOg/Z84crEh1dTUSIE9y2zIoD0ePPLdu3UrewkGJEqUvpUEieOGa7d4Zk9wTxmhYunimjK/c+LYcFpjxFAbD8BLyjiWB7TQmgvvqd9IWH337/FXP0+fdcFJve7qXrpufkJuTV1nuGuGkZqRq5migqKUC7EorSVk6mpg4qMmqSXEJjeAWGvmvueCLF8llN0b4+XPGF+lv3ECLAZT9HnXDLADa29t79+7dhIQEGEbWgj9f0wPOsub/FLAL1wmqbtiwgaAWEIyPj4ffhGWOi4sjP0Q8SiQDC6WlpZwf2kACQB+gBDGRHvREHby9veGFz507Fx0dzQl9ABTVxnECOGZFcejUqVM+Pj4gLDbN3LlzkQMKio2NBbUzMjLc3NywRjD+SMN5dZESJUr/UINE8JJ12x2iSwJzm8TVtV2Tmio29TKeSGa8+YGF4DHbaUwE943ZjomB4Gn7e7dd7Hn6sufFq+537/pu3LqZV52TWlYXMiouqihX11xB20JPRVdTRVtVQILPysnExF5NVEGAR5SPi3+EkdG/4YLBr4yM/3yOKC+Pvnkzgps3b+73qBv5Ij0g1d3dXVhYCICyFjDpiUpOnDjx/Xa+ffv21atXEYCBlZWVBXnBbjJYDHSqqqpev36dIBgCqeGj338HJnm+A2ZWSUkJGYKwoDBi4HbNzMxOnz7NTMgQYIqmy8zMZI9OsHXp0iXAHVYdeH38+DH5FfI0NDQ8e/ZsWVkZVoqkRCknyMs5KVGi9CU0SATPX7nKNW60kXeApJqGQ3xd8dpXlcxHMBjmF8xl/mUEAGJGgDEKseJU94GrDAQ/f8UYiDh+9mR0ZlJkbl1welpcZbG+lZqGmZWRrZWMspyonKiTt5WNm5GgJK+ytjIX/0gTExNWwd8MwWgWZMi8t4yhhQsZrwOm02/evJmYmAhckmgIsIuKiiJX3trb23Fez9mkZAj4g84RKUn9b9y4oaurC7bCFI9llgJjq6+v//DhQ4JgELm8vBxZwQtPmTKFDNFCSE9efQluamtr4xjQ0dGRmpqKGPzEysrq2rVrzISMSuKwsWvXLnjhxsZGTo6Dv/X19TgYwOTCqq9fv37cuHGIRx3Q7Ldu3YJrRp5YKfxKTU2NvCmfEiVKX0SDRPDMRfPcY5OcQ8NkdQxdRk0u39jNePgNFphJXkx1O94xELyDNhaBHYjp3XOlt+tyz/3nPdef0Hp6aeu3b4/MSMxvXOibUpBYGqqqK69maqNqqKOkrSypKOngaWFmZygoISCjJMUnzGtq9s0RjLP1igrWXcBnzjDei/byJfzj+4+6gWggL8LHjx/v94wGmBgaGkpe7fa+sBYtLS1gYlFRERnWQP75+fmrVq0CBJcuXYrMCYLhbbGCCPT19WGtyVdKIVBy2rRpwG5lZSXSIwalV1RULFmypLm5GY3DvnQ2depU8BcZQtu2bcMsiQe7bW1tFRUVYbqBVyAYRxfUasuWLVVVVUA80sPaI4z8yR0diCG/pUSJ0j/XYAci1m9wjAjzTUjTcYrMbjsJC8xEMI0gGPwdt5NB3rodfeN2MAYlGnfTuq70rj/bc/Qm7fCN7icvuheu22Dr6x9dNa98+tTIUbYyyrJuMXk6puqK2go8QtyWTqbm9mZSitLiCmIScqImJkasgr8NgmEewVxyt8PLl/TISHhF4GzChAkolJmCpe3bt+fm5gJ8qFJsbCy5fkWE9Djr/+DAK1v4IU7/YVFZ88xf3bt3j+2yCYI578+F+qVHDpxjvtig9+/ff87xznio3+ADZw7vCy777t27nE6f5Ml5jzMlSpS+iAaJ4NlLVjnHhgaPrrSMbciYfalyK8MC1zBHfpmel0Fe+N9xOxFggHjy3p7Vp3uWnuhecbJ39+WeEzdfVU2dEZE3oWzZ0aTiUV6h1hrmliH5zTqmKnJqsiN5hxtZGemYaitqKqsbKyuoSlpYmbMK/gYIxml+cjJ5/y/DBcMLr1yJYFdXV1lZGafJBSvJ287QhgUFBezbDIjmzZvX1NTE9qEDFUo/fJi+ezeZJgYGMgLz59OZtzegrD179uz+kGByYZ9J9S5fvsyKfU+dnZ1YEaRBnQ8fPsyKfU/Lly8n49SUKFH6qhokgucsXWXu55UxYWnClGMVm7urtjLv/2WMPLCGIOCCmV6Ywd+6HbS2g7QZh3pnHuqZdqBn07neJUceZNW3pE7ZWbHsiFuolY2HUfjoioDcSlN7XXV9FUFxASkFKQExfhltY2U9FWUNaTPzbzUQAWJOmMD4ED0R4AsEv3sH2sbFxXHawDfMhy8OHDiABly8eHG/M/Tjx48nJCQgDWt+4Fq9mp6fT587l0w0rGBZGV1IiP74MbwwSpw2bdrcDyk0NNTT0xMGFuhMTk5ub29nLeDQnDlztLS0WltbUc6mTZtg0lkL/iqYfV5eXgrBlCh9Aw0SwYvXb9N1DQsqml+15S3z/We0asYjGDRigQFfJnkZCGbE7Oibup82ZR9t0j5a817awuO0pg1HM8a31255Urtsl4OvmXOAqWt0slVogL6luoSCiLC0sJyKHI8Qn0tcua6pjogEn4mpMavgr41gMgRM2uTSf75ID1px3l0ATZ06taWlBa134sSJrKwsTneMU/ioqKj3v0j/eV28SE9JoXOOEjx/jhWm29u/e/Jk8uTJH/vExvnz5318fMg9ZGlpaf2qytaSJUsCAgIAYuC13/sz2QLE8/PzPTw8OL/xTIkSpa+kQSK4fenqxLrF0Y07K7b0sN8CwR4IZvnfne/G7eobu7NvzM4+kHfS3t4JXX0TumgwwuWz1yRXT67b+jCnucUpyMHczdjQM8g7eZSSrqKYvJCQrKC0irSkkkJYTo2WiTqvIJfxtxkLRrZJSeSUn8HB2Fg6k2VwuPPmzeNsqEOHDsHk9vT0gGKJiYmcF9yQrLCwsN9dwwPSixeM0jmv3aHE2lqAk56YuHv9+tLS0g8Oazx//jwiIgK+GzVBI7CfKu4ncBlNt2fPnhkzZoC/H3tbMTA9duxY5EMhmBKlb6BBInj8pEmRRePTJ6+p2Nxdsw387WU8hbydeTmOeS2OieC+cbto4C8o3NTV19xFm7AHAXjh3tKZ650ik6tWX0yprrEOjLTyd48sn+MU7q9hpKWqpyGpIiWmKCgoJapjbmxgpS0qJWT8DVxwD/P9v6dOMcI0GoN9HI+6cbLvzp07ISEhjx8/RtOVlJTs/uunmoGwmpoa1szAhfzHjCH3Hf9H4Hh+Pv69FxGREBGBEkk0p1CHioqKVatWwXobGBiUlZWxFvxVT548gUeGMcfBw8zMrN/7M9k6efIk+fJeU1MTheCvJGypurq6pUuXYqtBaHPWgo8LXR3nLitXrkR6HN0/fTUVunXr1uLFi/fu3Yv05CZCdOCnT5+uW7eOPTiGaiCrBQsWdHV1sW9zpPTtNUgET5kx29jNLbG8tHbTy+rtjCEI1uMYrPEH1lS/i2GBx+1619TFmJgI7m3e97Zq/vaMlo66tSdCUhOjq2ZHldenT1mrbqqpaqChbWKoZaKppC0jKiuqpKujoqMkISv+LRA8bRrjqhfRli30oiJ6b++9e/fgdjnvc0BnzfrzUbfly5e//5oeIIzzXoKBatMmBoI5Te7ly/SICOwo2EBpPj4wuaz4vwqNAASjVtjrHB0dP4hpiGAaAezGo0eP/qCbhqMPDw+/jHLpdArBX084HfHz80thysLCgv2prX57Iudseno6TlyQHsdR9H9yfwsScKbhnN23b5+LiwtKmTVrFmEutia6q6mpKbuHTJ069dy5c9evX3d1dUWvIJGUvr0Gi+A5c408PBzCAirXPWSOAr9jPgjH8LxkFILJ33cNu97BAiMwYc+7CV3vxu8BiGnjdz1PaVg8buuThsVrA1Lji+fvKGnfEF42UddWV8tE39jGxMBKV1VHWVxW3NDJV15DXlBSwMzCjFXwV0Lwvn2EuYzwvXt0f38681G3goKCfo+6wYwAuwicPXs2IyOD84Lbq1evgLDBDAHjJykpdM77xmBzEhJgyVEQdpWFCxey4v+qixcvgvhAJ6qKvZS8Ieh9rVmzBm4dWd2/f7/fw9NsAcpYWfb4CYXgryf0EHJdFwF4VfYOCERu27aNzGJTYhOQeJyUkG9iIdDW1sa+JowNVFtbS3CMfghnzX4S5yhT5HFNEgNh1s7Ojo1gd3f3M2fOINDS0hIaGkoiKX17DR7BZp5e3ilpFesfVG+j1TAehGPcDsG8I4Ix8gAENzD+9tXt6mvc/R8ET+jqbVx3dvS0juat11OrqmPLqzMmL01qWh1dNlXLWtfQysDGxUjLRMXAWsvMxtTJN1xeXYZXjNvc0oJV8NdAMDplbCxrCBhIjY5mPIvBvKtsxowZnO2DUsgQMM4Ek5OTOe8ZgA+trq4ejJt49YqelUU/f541C6FErAss+bt3+/fvLywsRImsRRzCXhcXF4fTWNRw+vTpH8P0+fPngWkcHrA3pqWlfQzTOC/m/PIeQTDs/IoVKxobG6dNm0as/aNHj+Cm0Sw7duwYyNkrKokDADbN5MmTyc7f29u7e/duuLMlS5Zwnl58QqgMjnw4ZcY6olDkcOPGDdQBq0YSYHOgVigFh6sPHmC+Q6FlJk6cyHkVF1sZq7B161ZsIxwy3z+hWbZsGXoga4YpdI+amhqs8tixYznvQIdvmDBhwsGDB7GUfaGiH4IJu1GBwMBAHAlIJKVvr0EieHnnQUPvCGMf2+yZe2s6uxkv4sHEgC/jWhxjFHhnX8MuMhzcB/KO3w3+9gHE43c8G7dwW/WqS0Uz18SVT8hp21wwZ0f5iosFk2br2hoGxfrYeenqWWoY2qtpGWuISksLSQsqaMibWXy1+4JBt7w8+rFjjDCaYsIE+p+PumVmZnKyD7yAycWJGyiAHr/5r+O28I9g5UCo9BehxEmT6P0uoG3fTs/OBtThdwBZsrL9hK2GOhDsHjp06GNfQsJ+Hh0dDbOD9J9w02AZCuIcPyEIBjdhpgA4OCycCCMTYPr58+fYb7GIWDMIpZD9mQjVZldm7ty5OClGAkA8JCQEOWzZsgUIRkMdO3Zs3LhxpPvhL+cNf8ifk6Q4AccBD9uXNAVcP3Lw8PAgNzhDwDGgg1JQbawvyfM7F45589kDX38KzYhehFV4/+CErZObm/t+B+vs7DQzM+v33QA0EXmNKg5d2ehLTPVDMIQjLg5s9fX1H+w8lL6NBongjoOXvbPGq1gZuKfXVm94MmYHuRDHGIWABSYIBnwRqN/dB/4y4LuHweJxHXczpm5s2P2idOaK3Nn7x2x9Xrn8YO3Gm/HFecZOJkGJLvGjfTSMlbXMFGXUZeV1TCUUJaQUJcy/0kAE1n3GDMaVtzt3GBOoiv7KfNuZv7//qVOn7nAIRN6+fTuaiwyn3r59m7Xgzh0wLjg4mBMiAxX2nLIyRjXYgmcJCyN3AQOs/VwPWxs2bMC+isrAlqJB8Je1gEPYXXGiSu5jA62Ki4s/eIQAYZFDP3dMEAxEEm914MABbW1tkNHX1xcox26M1mbf+oazgYqKCkLwW7duodrspmDfSAfKKyoqgpLgTlhY2MWLF8m9GSQZEIBzCHJhCuFJkyZxMqW5uRlHvgsXLnDWH+fObASzn1o8ceKEqqoq5/Hg+xQ2HDjb71keCF0OmxXHOax+vx0T7hhHQdbMn0J7IjHaBxua89XV6enpaGEEcBKDTUYi+yEYjbmaKWxWdktS+vYaJILnLN3iV7LILLzUPrE2b94ZILiWef8vmEucLybAlzkKwXTBjCEIhguu3nS/ZOXV8Xu70yavrtv+qm5PT8aUJZUrToSkxlh7GfjHmPtE2qroy+tbqmgaahs5BShoyovLiZuZm7IK/rIIfv6crqHBGHmIj2dMOjo400Y0Tr3t7e3jOeTn51dQUADXgE4P0wE7zFrAFPAET0ey/Bu6fZseF8e4F40tICY2lvF0HPMb+HCOJLqfcGoZFRUF7GLnwcEARpW14K/atGkTqfPDhw/7PTzNFrZ+ZWXl+/exEQTjt0iAvzk5OYhBPCgpJSUlIyPT2trK2XPgxMvLy69duwb+ct6iR3JAAKTAyTUCIAUOb8LCwjDFnFUCIIqKioB18JfwlC3QGYyAfS4tLWX7NU4Es0tBgsHcjvLNhYOEnp5ev3shLl26lJeXB0RiKQ5+u3btYi1gCkc+tp8lQjK06s6dO7HuOC1AGJ2TLGppacG2AGQBdPhcxOCIDqNgYGAAypNkOP75+PiUlZWhC5GXPVH6VzRIBLcv2xBY2p4z40RSS1fgmA01cMFMBJPBX8LfcbsY9wUTBDOHg4FgzPY2ddGaunrGbr5fv4c2bld3wqR1xQu7EgpSfGOcolKdDe2VDG00DaxVZdQUtUwslPXkBCUEv9bTcbBLsJx9fQwfiglhpubNm0c+VMEW9gd0aywCAnBSDOfIWsAUqgTjTH47UPX20keN+vAQMJN0GRkZnNdS2ML+AzcE4qNcrD7ONFkL/irsgREREWAcdlScwH7shoo1a9ZgJ0RWrPk/RRCMABbhlLmhoQH7M+oDXqNucLIwm/1AuXHjRkT2+1YThBxwMCC3bWAWJba3tyMTZ2fnuLg40JMkg3D6jINZvxd7IgEZlMDPHR0dsV1IPCeCIfwE9QF/OTP8boV1wbbrd1CEz2X7dyTod10BK44ErBmmsIE4rxXjTIXzxXg4hE+fPh0nMaQxsQglouXxl1yFmzt37pQ/NZhryJS+kAaJ4Jb5qxXMArNnnCvf8KBw2Y3aHb1jGff/MpgLBIO/9bvfMSnM4C/MbyPjb1/Tnr7xXX1Ne99N2Etr3Ecb10Wr2907bvuLsSv3xGWnekUGRqV5qJvIWbnoahorqFvYG9k5KxupyCrJmFt8HReMHh8ezmAfEQeC2R/ZJILLYCN41KhRbLtB9LcRjBKnTKEz3232H+3cybgu19uLPRPO+oPfp8DGwmqSF7OBwjCn7J2WU3DHkZGR8DsIg9FopQ9uZYJpzlFXttgI3rBhA6AJriGAXZdtxHASXVVVRcIQEsPB4awWjAZJWbFMwaYB4qgSAiBLcHAweYUQ7DAwyna1CGDVYNngZEnNiUAZ8vpNgmBsCBLfD8E4cwezkAYB/GXF/ozC9qKI+TNpkAiet26fY0JT2dqnqe1ny9c8YN4LQQN82RYYCEaAjWAyENHEnBj83duLqWEvDUa4bsezgqb6yMzkuIKy5NxQTWMlEzt1U1s9a7+YkPQyY3tzY3NdG1tLVsE/B4L37ME5M8N9s/XoEaBCv3sXqIIt3YMEHxLMOHkfJk5XExMTSTv0E3CJ01hy+nnixAlA82NuOjY2lhN2nCIIXrx4Mc6X/fz8fH194f1fvHiRkpJy8eJFBNBE7Eo+evQIBwOCbBCzvLycDdbNmzdramqSHFBhxMBzgcjIAYcQnFyT7gdowvyC8gg/e/YMFGaPTYM45D4H5IzjCsCNzGGi7ezspk2bRgpFT9DS0kIpOLPG1iE//FmFTcYeQ6f0E2iQCJ678WhSy7HKzu7shddS5lxgWmAa4xIcw/y+a9jD+Psngpk3QhAE7yUTbcJehh0e3/UORrh28/XonCTncN/Q1OS8iqTwRH9XPzMTWy0dCzvvuAwNIzVtAyVbu58Iwag8YMR5AQ3ASknBmSSCwBOI88Gzaax1UFDQw4cPP41pnK7CkCINUqJiHxwCRv719fWLFi1izb8nguBz584B+kTkQ3lAJ/Lv6OggV3uIsO737t1jzdDp+BX7ctz58+dZv9+1i/wERaNtkcPevXtRSZIMCCZnx0T4OeflwTt37pBCSbPD+MOD47wbf8lj1pz1fN8h9ttYEGLej/yEcF7PCv0p7DUfPHt4X0hJhq2wspynLJxhLMWpAzIciH+nEPyTaZAIXn/4ftXmNzWdvWO29Yzd0T12V8/YXb0M5jLHH8gQBBmLAGon7GYOROx9R/hLQDx+L+ww0nSPXb0/vqzMMSrO2c8xLT8pITvKyddIx0JZUVNHxdBcXkNGS0fOwdGGVfCPjmCwNTOT8RlQTmGPmjQJ/16+fBk28/0dHkK5SUlJZOxv+fLlQOQHMQ1DGhISgvbBzlxUVPSxK907d+6EO2Z71ffFHoj4gFC9D9nqT4vzev0ABdx/cB0/Lc7WA8cbGhoSEhI4OznCxcXFADpr/pM6ffo07Lm3t3e/muD4kZaWNpB9BwdCc3NzeHZkQkbkcTDDqUB4eDibwlu2bFmxYgVOXOLi4j54cwunKAT/ZBokgnddpVVv7a7a2lvN+C5Gb8WmpzVb3xDbSxDM5C8DwTC/MMKNXe+Yo8AMBDfv65uwjzZ+H3N2x8P0uqaYygkJVc3hie6pBaMSsoNsPQ1U9GXUDQw0TM1VdRTUtGStbL7OoxnfGMEoaNo0+l8/bk8/eJBxXa67G9YPe+AH88E2gjUmOx7cIjDN9o+cQq2AG/Jtt/nz50+fPv2DGxdFBAQEfNAds3Tnzl03t+6QEHpUVP8pIoKuqkpnkh18DwsLixqAXFxcyHguapiens6K/aRwIJGXlycD4sDTAAuysbGZMGECYxWYAo7RGu7u7pwABT39/f3ReVjzzAcc2La9u7sb5pqdHkeOa9eumZmZcfpTNF11dTWqxG7e69evs108IlEoO0OsArm9mh0D8mITWFlZkaMFZj09Pcm9Yjh9KS8vZ6b6qCgE/2QaLIIvvqjd9ILxXBzjQlxvVNPu2o5X9YyBCBaCG/Yw/jbuYSK46x0DwX+6YCC4CRTez/jbuO5wXMmYkILGtNrxpo6aWaUJwXFelk6G8ppyejb2mubm+hZGGvqq1j/HWDBom5/PGHZgCxwMDgYUsRVqamrghljxfxXMLwrFvopzVUD2Y5ieNm0aqSRwkJWV9cEhYGQSHx//qVvoAHdUcv9+htt9fwK5RESAgXv37sGVoybgyKeFNK6uruRVR+PGjVu3bh1rwScFGy4sLIxVuHDhAvANfrEWfFznzp0DK/vdyYdITgTfuXMHFSgpKeFE8MmTJ8vKyl6+fImN29zczH5pAxHqz4lgZLVw4UJQGzaWve/gt6WlpeRGaRyZsCnZDhcIRp4HDhxAF0IlSSS2IxvByBAlkh5VWFjIeZHzg6IQ/JNpkAhe3rG3bMXtmu29NVtfVnc+i5uwddyOHoYL3sOAb8MexnAwJiAYzAV8GwFiguB9fUAwg8L7+5r39lTMmlPevi5j0vKY0Zk+oW7hSX5OvrbKOjK6Fha2AdGGDjae4SEisoKmFl/nNT3fEsGgbUIC6zFoIiAyL4/xLBzzxgPgiU0KTuHMNDQ0FOzAUpivbdu2sRb8VQcPHkxOTgaz4NEAR6RnLeAQtjUc2cyZM1nz7wtNMW8eY2Dkg73ixg16QAC9poZ2/Pjo0aP7NdEHBXKBbpMmTQKVcO4/ZsyYgQx3Yl3QyLGxsdjQOGAM5IOhwBlWH6tGXkjPFieC0Tg4OUBK4JITwdDZs2eLioqwCTZu3Nhvj+BEMBZhLc6fPw8OciIYAnNBdnRIbCPOg9/z58+xceGmkTOOQ+QnnAhm69KlS6jqZ1eWQvBPpsEieNPO8hU3gODw+o1prV2j5xyv2vhg3I7eeqb5ZSOYeQmOgWAyEf5O3P9uIvgLF9z1rHhiXcncrSVzNtl72SZkhpk525o5GCtoSGsY6wZmVCnpa9h4uUgoCZuYfZ33BX8zBGOfLCxkuGBOAXaNjXBBMHrIEzmz4jmESPaL2bC+jY2NH9xeaBAyBIwwzmQ/hmmcg3/sdmOWjhyhjx4NcLJmOfXqFeMq4smT9AkTZmZnf+x+5H5aunQpkHT16lU4WRwY2CfjnxDWAnRDeyI9WNnvZtiPCVsfVUI3+ASCsVkBL1hOtFVFRQV5JxxRb29vbm5uWFjYi/fecseJYKwIqoQc4LVtbW1heDk3B1BrY2PDvnOZCNaYXCG8ceOGpqYmwe77CCZPGOLc4v0K9BOF4J9Mg0TwrMVrPFKbKjY8VveIDRjdGFm9NKBy9ZjO12wENzIGHxgvaJ/AvOxGboEg/hf8nbifNvFAX+POB3FFJWktG0fXTwyO9XEJskwuqdKx0NA2UjaxNY+uaFPSUdA315dXl/xaT8d9GwQjfxhPGN5z5xjPYpBp927GU3mvXsElRUZGrl+/Ht7qfU2ZMoVgFzbNy8vrxIkTrAUcOnPmTFpa2i7mw1TLli1LSUkBd1jLOIQmAmI+eLsxS9j5Y2MZD2p/UE1NdGD33bvDjY05MTEDMbMXL16MiIiAAQSDQB/UirXg40LzAoXkqWj434+dGfQTtg5aACk/jeBbt25hFgoODsbWZF/4QqH19fVbtmw5fvw4vDB7DIGIE8EvX77EhkAOOLQ4OjpeuXKFvfugDnD66JnwwuxHtyHkTF4ngl8Bu+RACwRbWlqyL1HevHkTJwrwv9euXfvYezzY+ocIRoUHsu049XfTQ1hN9rEEjY9Zzu34fgyEUhD5t3D0tVcENeSsJOqGQzVnJliKGM46I+bvrsUgETyxbZq4pk3EmE3K1oF6bkEyhjZx43fU76ax/S8QDP42M+8/Y060CSz4Mvm7nzbpQN+E7Xc8UiuK5+8pGFsdkRLq4G/nm5Dg5G/u4GpuYmuWPqFdzVhRw0DR0sHQxtaKVfCPiGCU4ujIQHBR0X8mPT3GZ4oYX0raAT+FPf+DYu+02LFxYs6K/atQH1QA2xGdw9PTkxX7nhwcHD5xFxr6Dj03lzEE/EGBicy3Zzx8+DDpI/cj9xMsHohPINXc3IxjA2vBJwUnS272ALA+dtWxn+Au4WrJDt8PwcikoaHBw8Ojvb2dvYE6OjpQseLiYgJKVA/UY98gAcJy3m2CHCZPnuzi4jJr1iwQnETCPtfW1vr5+a1bt47sPjjC1dXVkV+hb1RVVaGhmGnpACt6KYxzZWUl6VQwvDDRTk5ObW1tqDy2Lw5Uan8KeZIffkz/BMHoxthN0JEGuNfjKIXGsbOz+6w35xTaIT8/H2uHMLoKDkLY+mhVckMIzvnQFEuWLEGXJnc64rQMTYSdDhvr/WflP6ijR49iu2A7suY/J2yy6dOnY5P1G/z5mHB0XL16NbwRe8AKm3XMmDE49KJcYndwuoPGxKohnmzu/fv3k64O64CjLPN3n9cgEdzUMllS08gqtNAlpkbXwV9W0zCgeFnDLoJgxv0PDcDuPhrjFjRy58M+xlU48BfkxURccNPWS9WrT1VNX1bdUBuZFpWYGxOeEhkYY2/taOAe5O2ZMFrfSs3Gy9Te3drS+uu8pufbIBgcycxkXMviVGoq4x0RzA2JPkri3hc6AQnAJX3sXRDwmAUFBSScmZn5sQ2KHD61ey9YQGeu4AcEXxwaSn/yBLtWYWEh+wVpnxBSsl8Sj/Q4AAwEpsAfWhI7CXYAuFrOW48/Jhx1sDnY77t53wUPRGw3StRv4w5EgAjnCqKTcHol2GrY20Fk+0H9EwQ/efIEPTkoKIizk4CM169fJ2HE42yAPL4IYb2ePXtmYmLCCRRsIBxR2EcpHFEOMG9pZwuzOKgQBBNsIYCNi3IRCA8PJ8/goD9jcyOABGPHjkXRhw8f/tgV6X5CDVGKj48Pa54pwB2lsGaYD0yyD4TYxOhOOHHhRDCOjpyjRl1dXex2wOZ7/PgxDiSkD0M4apKLKPgVykWG0dHROEIjZsGCBRMnTsRWxqGa9CWcvA68Hw4SwQ0Tm7il5MTUdA3dE9TM3RQM7NxHzyXwbezC375G5s0P5P4H9sRG8KQDtEkH+ybvedS071lsfl5cZoJrkEtoSlDoqCjvUGsDS7XAtCzP2FgrVzuP8ABDS3Nzyx95IOL7R/CpU4wh4L+egLPU3c24Z475oWiY6AF2LGya8vJy7KVoFngu9i79CaHHBwYG3rt3D7/CpvysGYRQJWwUznvvGAieNg3n+fT79wc0HTvGeFUTkzX3BybUkD2IDJPIiv2c9uzZQyoJBLCiPifs6mzfzal/OBABcvVDMFqePNwI7syfP3/u3LlsvBKZmppyIhhLYY1xYoEAmgKc4ryXGXXGtoMNJAj29fUlVzLQE3R0dHCgsrS0JLerYy+Wl5eHvwYZcfaD7QgWD/xAdeLEiX4IRlalpaVnzpxBxVauXDl16lTOA+GdO3f6IRj7MnY9WFq0BiqJM5t+l0k4EQzHTQaUcExCg8C/a2hokGsbyAE1wVHWyMiIlAgHHRUVxdnIn9AgEdzcMlVCVZtXRl7dPlTDeZRH9oycBRfqGQhmDDsw73/4D3mZE23iPiZ8QV7GBATTphzsnrTnWVJBfuSoJGtvt8LJ03ziIzwC7a3djIqmL7P0cAxITAiICtAxM7exs2UVTCH4Pf1TBJPrbByvN/uLJk+mT5+Ofz/x5qB+wsk1TBB2WviC7OxszoHRjwkdF92ddHH8ra6uHki3hL9OTU3l3M0YCC4uZrz0rrT081NKCl1KCtVFDlVVVXl5ediBP63i4mIDAwPs2ygLCMBuxlrwScH3KSkpoRR0Hrh7nEmwFnxcOG9QVlYmDcIW2hMEIQhGbpw0GbjeRzCEjYUu1MR8Nf77Ld8PwRChMDiLdWE7TQgrCEuIeqJLEwQ7OTmRxzhxUMEaof/AIaIrIgDWy8jIAN+SkpLkiVDEVFZWMnP6vN5HMISDfUlJCXJDKZwdA3ofwRCpKnZtdLn325ONYLSJt7c3uTKMIvT19dGrFRQUyOtQcESxsbGBBzc0NMRaIAY2H3Uj4c9qkAge1zRBRtNCySLEL39e2cobNVuf1u1828DkL5wv/rLhO3H/u2bm4C8mJnz7phx8N+UQbdIhGv62dT0pq6+ITMuKLa4es/ZoTEFsbGqwo49dfusyS5+AqNw8Cztj36Qib/9AVsEUgt/TP0IweklFBR19C4H3J+w86elYZ5xeATdgK3rVp4WWwQkaMIG8Z82ahV0aewJr2cdFBtSQEme1CQkJMBesBR8Xdt3Q0NB+PvHYoUObVVUZt218Vtg/c3PpDg70e/fQLOg8yJO16OOC3wGCQUA4R7h7Tvp8TNhL0T2cnZ2xXYAGrOln9zgkQLsZGxtzfggDQt/DMQC1BSsrKipgu1gL/o4+iGDMonu7urqSwdl+eh/BEEo3Nzcn6GRFMR+iWb58OZCEfQT8wtaBCyZrAQKSV05jLRADgu/bt09XVxcsRpPuZ16EQAzSfLZ9iD6IYPwWOdvb27+/s3wQwUjf0dFhZWXF+X5qtjhdMDrbJuYN42gKExMTdFR1dXXyfBNWh7hgIJhwHx584J8OGCSCW2bN17QJi6paV77m7rjdtHFdfUwLzLjzF26XRV5WgDX+MPFA7+RDDApPPfRu6mHaZMbUM3X75VH56aGpxYUzVo3rvJxYGB0Q4eTk7ZZY0RSRW2vj7RMcG1k0bXlgcAirYArB7+kfIXj9erq+Pj0jg1HDflNaGt3ZGYRChrm5udg/kflnBddDXuR2/fp1FRUVuFTWgo8L7YkdFX4EBSEQGBjIWvBJ4XyWXBXh1OuxY98N5LofmmjOHMarQcvLr+7fjwr0Gw7+oLCJsR8uWrQI/g4Nzh6A/oSwRvDXOL9OTExEF4K9HQjojx07lpycPHHixH4IhhYvXvzHH38MHz48JSVlcHsuYIcW5vwtqIEOD+jjuIL1QndiLfhT/caCofPnz48ePRp+EL51/vz57NwQg1aCsrKyGhoacPipq6sDE7EIZ1EhIYy9GNaeDE3gQIKfI4DDCXkzJxb1q9sndPz4cThT1gxTaFscA9DPUY2ysjLiA9i6ffu2g4NDPwSjJjU1NagnHP37rQ0Es18Zio1OnvpBx0YvJWPBbKsB041jiYuLCxmamDBhwsAHiwaJ4Ka2WdqOCVkzj9duf12/h8Z8HIMx/sBg7n/8LwPELATD/8L2MgYfwN++aYd7Ww73TjvwYub2sxPmLsqumTZr9836DacSsiJN7NXsPNziKyZ7JxcbObjEFYwtm74s/E8SQRSC++kfIRgHecAI/eb96epVRrWZuWVnZ+M8C93rs0JKcuH75s2bOLljxX5S2HbYAZi1oSclJcE6sRZ8UthXOYcgGQKRS//6/rmPCTVMSKC/efO6pCQpIACOhhX/ceEIkZaWBgu5ZcsWHI3g/gay45DPWcH6hTPf8T8Q1/zixQts9MuXL2OfZ0ABpRw7hl5Cpt69e7XExIb8z/+cB9f+jPzPhMPPJ1cfvRpkRP5AOXtMCZFAIVkd7FnYFuwDEsiLlCAOOj/7ShcohlMWsg+CekAw9g6yiAgHJxx6YdjRZ7CN0L3hNEEl8gw3wq2trTDLIBqpA/YdcBC789ixYwcybAUhKxAcR0TOLxCeO3cOeZLromjG2tpa9iKcwKE9/f39sWuzz5xQLlqD7Mv4i3qyF2EWOwt6Y3l5OTnWogMgQxwksOmJZ0e/RR2wOqg2jl6IOXz4MA6ciMGBh4xRDET/3+AQPHtJu5a1R8b0I3WwwEz+1jPeAvHuTwvMnhgIhgVm8PcQ/G8f/k5jIJi26MTbplUH27eemLxs29zl6zacf1O7oishPdTK2dTSK3DsygO1i7pCE0aNnbtx7vq94ZFRrIIpBL+nf4TgNWvoS5awwv0EwGVnk8aBC/7UCnIIxCEIxnnfJ9aLU9hR2SmBOVgYEv60sPv9BcHYebD5sJOjnp+ecP4eGgozQ+vtLQ8KmjljBlbts5o8eTJ2b7TtsmXLcJ6LfZW14OPC9gJ5ASwI5/LYM1kLPi40Wl5eHrkrgIVguDCcjqB9/pwWpaRkenr2ccSwJk9P+FXSGB8TiAnPS8SKYkZy9hksYs8iQBJDnP4ds6zQn2lYM0yxSyH5YBZ7DfuOEURiFmKXAiExcIy/rPnPiXNF2PkMcEU4IzlX6v2fE7HTIMC5IhAZV+HM5P2Yz2qQCG6dO0VKXSOwdHndrl644PquvgbGWyAYN58xycu47ZfceUbuf2D6XxoQPO1QX+thWtuRvr3XultXbGtbua9p0bbNO7Yfufm2ZtE6Ow8T68AY95jRjVuv5U1dExSZsPfMjR3HLvsHM25nIaIQ3E8UghlqbKT7+DDu6/js5OZGvtB67do1PT092HacU39aaFWcw4IRaNvExEScgbIWfFLW1tbkjHvr1q02Njas2E8KHSkhIYHswAwET5/OuGb410ugsGNsc/cfkU8OMnsL7R1966N3mx5+i2nRnb4eZne78rr/oq80rbr3bs8TRpH4f+fj/ks/O61m/5zW8/Ry16OzHX9run981cs7A/LpA9cgETx5xmRRVbWEcZ31jHuBwV/WvRBkCIKJXQZ5mRPj/gfAt+UQrYXpf6cf6W0/Tjt2/eWCjbtaVu2eurRjyerVx+50x5cVmjiZ1y7fVzhza+uhV+VzttdPX3b82sstR695+f9n3J1CcD9RCGYItPrr6fBHNWMG4wMldPq9e/fQbgPp/9jcwcHBCCAxVvCjr/H8q6qqqs4zP0y1d+/eAY4Mnjx5soT5hT1oRkvLcWFhuqsrutfnJ3t7nP/TMzKwMjNv9pVcoE270fe1p1GnaRb7ad199Ke99KgTtJb3Enzxacr1PqVdvQvv9GE1l9/tyzzz91Zz6vU+uR29828zjnB3Dsy/tLb01p4ZA5+ub5+0p1zx6RXGuMQX1CARPGHqRGNn/1FNnfV7esZ10Ri3AzPuhQB//xx5ONg3ef87wuIpB2jg77TDfUDwjGO9c47T5h/vXbVt/7JN27NrJ05ZuHn1lh3rj90ydjRW1tcau3jX7OPds452T914tOvMveUdx5bsueju7cEqmELwe6IQzBAQ/JF32PfXXxFM4j4tGM9vg+BTp06xETx9ypTDWlp0dPWnTz8/gcI9PUDw3W660V5a3ClawtefwN/4UzS44MYrfY4H+y/9GlPgMZrPEdra+31PeuhW+2kxJ/sn+PTkfphmvLd36d0+WOADDWZnF446tzht4NPxVt9jrT4vbv79D/V+UoNEcHPLFNeQlLD8aZUrL07oet2whza+i0YegWNjl0FhBBh3oQHBfa0MC9w3/0TP7OO9C4+/bprZvmn77rEzVm46cmPXiYvN89Y4etsZOHrkTGybfZw2++jb2Ru27TpwdOnGAxNW7HRyd2EVTCH4PVEIZuhnRPAhXV0GWAeihASC4Ec99KyzDJP4DRR6nJZypq/3Hb31Rt/Ox9+izOPP34Ufp2188O4VjZ5/rq/nb4y4MrTmXp/tgd5V9/Czd+eXj2bFDlgPTqw5OSvkxe0Pf+tr0BokgtvaFyUVTcyonJVau7Ck/cCEnc/H76U17+9t3sccfGCO/E45RJsMEB/ondj1BvBtPfIOFF5yuqf9BG3ugSdz126YOGvu5tOP9l7rXrTjRE55mU+ET3ZTe3bzzPn7783fe2fpquXrOrbuPf+wbeNBnwBqLJhC8F9EIfgvAoJPn6YXFwPBYcdpq+4xxky/9qS9h7b6HqN7tN3oG3O5r9/SrzE1Xe2D+X3ay0Bw/Enasrt/r9DiC30wwm8Z4H53qj3qwYm1f2u6tLbk5Kywvt7PP530tzRIBG/dc6KgYUlJ0/L8+oXp1XMzm9Y37HzF4O9+8Ld3Mmzvwb6Wgwz+Ttj1YlLX67bDvUBw2xG4YNr847RZBx5tPnZ9Uefhc/fe7r36Zu2JJ2OnNPslZzStPdG8tGPOso1NrXPnzmlbv+PIzgsvK6fO9AqkEEwh+C/6Agi+e7cgL4/EfVqcCEZT/10Ed3V1DQLBDbW1RwwMBorg0FDGc+TXr8OTHnn2ruvJt5gW3umjMfva3e7+i77StOlB38kXjCLh8+GI+y397LT0Tt/FV6y94/nNY8+u7P9b04OTa988GszjMJ/WIBG8buve/IYFxU1LSyYsLWicn1k9t3Hb80l7+ybtZ/B3Cvh7qG/age7Je55P3HK39cCbtkM9rUdoM47R5h7vW3n6zbpzb/ZeeXPw2tuHL3pO3+nZevbFgvVbpnacmbzpYsvMucuXLqksLxsVE7ty69FVRx9Uts4LjkliFUwh+D1RCGZoEAhesaIAW2EAYiP44cOHVlZWfwvBfX19Y8aMmTJlCiv2k2IjGNs0OTa228eH8ZrT9vbPT3p69IG94OYLajDg+Gf6hyX+4wp/lTUeJILbF6/Mqp5VMH5RSdOywvELCxuXTd71tHl/z+T9vVMYt6AxbgGuWHYosXpuy/JdpbM62g6+bT3cM+84DdP+mz37rnSfufv29O3ue0+6rz/onrZy74z1+1q3Xli47czmjetqy0riIqJD3TyXdRxae+haW+eJsIR0VsEUgt8ThWCG/i6CL1265+dXkJ/PivykCIJpNBrWzt7e/m8heNeuXR4eHn/LBb948SIlJeXK5cuMsYUDBwY0/fUjIJR+IA0SwdNmzksvby1qWlo0YXFRw6Ki8UtSxi1p2fdi8sEepgumTdr7PKNqoqd/bHltg71HyISNV6cf7pl/vG/Bid4z93ov3H175XHPmbs91x/1XnxIa5q3edyUuY1ty1Zv2NwypTnENzAqMDjSy6dj96Gzt54sPnwvnEIwheC/6h8heMMGekrKvQMH2O32aREEL1q0aOrUqVjBgSN4z549CQkJGzZs+FsIRmugb7OiKP3sGiSCZ8xbEjSqpLhpWfnE5UWNS4vHLzS38QpMr5vUeXvqwZ6ph3rHLd1bNabB29Pb3dXZ08kpb/L6WUd6557oW3aq99it3rvPeg9f79l9tefAjbcbL7xtXbprck1NcWJKVUFuQGRiUWlNQnh4gE/Q4QOHHz7v6bjYG5VIIZhC8F/0jxCME/wNG/7W5Thtbe2srKze3t6BI7isrMzT0/P06dN/63Kcubl5bW0ta57Sf4EGieA1W/a6RmTl1S8sa15W0rysaPwCB2cPCzvXgITixvVXWvY+K26YPr1tloejnauDnZeDXcH4xXOOvoELXne2Z/eVt6fu9q4717vybM/Kc91LTvcu2HR0/rgxNaOSR4WHOvkEbdi0NTYk1C8wYm/XgZsPuzsv9SSMohBMIfgv6odgWmtrj58fPSvr85O9PdaHTqPdv3/f2toaYP2sUCsNDY1bt26hbdEU8fHxrAWflIGBQVtbG36yf/9+d3d3VuwnFR4ebmtrO5B3BlH6aTRIBG/afjg2p6Fo/OLC+oXFjYuKGufZObk6O7k5OLhG5Y4vmba2Zdbi+XNmuznZu9pZu9hZ+QbFtm45N+9474bzPZvP96w517P0DG3Zmd5lZ3qWnOrduP34yonjalOSsqMjdAyMt+/YFeLt7+bivm1H1/Ebb7df6k5ITmMVTCH4PVEI7uvrqy8vP7R6NR3+9LMTmpHJOPzq9u3bsLSf1fXr19mN//TpU1bs54SfwD7jJ93d3cA3K/aTOnv27AdfF0npJ9YgEbx2y46U4vGFjYtKm5YXNizMrm61s3dycnR1c3Vx8wn2C41fs3LNnJkznOysHawsHOzsvZxdwpML5h99ueJM79LTPctO05ac6Z2y6uC4uVsXHXs+tallRfPY2qT43NhoNXXt2XPmBXr4eNradnTu2n/lze7zTyJi4lkFUwh+TxSCV69ePcCyKFH63jToseB2M1ef/PpFJU3LixsXj65qsbF3cgCEnSAXV3fvlUtWzpre5mRn5WJvm5Sa52ZvGxAYWjN789JTrxedoS0FiE++SErItrT3bpi9MTEopDo1pSY1qTA5SVFZq6q2IdjH18PWYcvWXZuP3AxOSDO3/JE/3/kTIDg9/e8i+MaNG4NAcGJi4t9F8MWLF1NSUl73a15KlH4QDRLBzS3Nlm7ueXULi8cvgxfOKJtsY+do7+jg6Ojo7Ozi7Oo5eVLbjGlTHW0tXe1sswvHOttbhwYGF1TW187auOjkm8VnaONmrI/xDNDWNrQ0dwxx8ojzD8qKS4oNi5FX0rSwcfX39g/wC1m0dLVbUKywhKiJ6X/ewkchuJ++OoLPn8/V0PhbCEYrwc/W1dWxYj8pNoK7uroMDQ3/FoKxIUBt9lcXKVH64TRIBDc0N6obG2bXzCmesDS/YUFacZONnT3468i0wfYunqUV9dMmT7CzMnO1tUpNzXewNvfzcE9Pz66sb5l/9OniU92FuVWpAYFSsiqq6kbmmvohjs6J8ZlpyTkaGtpS8qpmFnbe/mHeARGC4pJCEuLGZtQXlP8lBGNNk5Nz/fwGjuBjx44tXLiwoqLib7nge/fuoRmhgSMY3QCUJ5+ToUTpB9UgEdwyfbqyvv6osta8+vk5Y+cmFzTa2TnY29s7ODjir62zV1puZcvECdYWJo7W5hYmJs7W5v6erokJCaMycias2Lfs9IuSgorssDAFVX0BcTlTVS1PXd1RCdmZaQWmxmaa+kbaxmbOHkESMvIS8gqmobEOntTLKv8NBGdl0Wtq6Js35+bkDBzB4G92dvaVK1cGjuDa2tqsrKwTJ078rbHgBQsWkI+fs6IoUfoBNUgET22b4eAfk9+wqGjC4ry6+Qk5ddbW1nbAsIOTvZ2tg5N7RFJ+U0ODiaGupoa6jYWpi4NVoJ9PTl5hwqj01LyaSav3NY6fmh4UamRsI6ekaaKiCQRnxKZnZJWaG5vrmVr5hMYpqGgLCAuIKakG1E+3dnVjFUwh+D39UwQvXkzve+/DnZjQyKam9LFjkSo3N/f27dt9AxBqgmPwHabGjRvHiv2kzp07Z2xsjDZHQUDw06dPWQs+KW9vbzTOs2eMT3VRovTjarBjwVNa4nLG59UvKhq/OL9ufmxWjbW1jY0Ng8K2trb2jh6+ERmVFVWODrbqWgYqyirmZiaBgcEFJWMz84qSklMjknJGpWYneXrbW7kYGVlZ6+i76+rX5JalZBSoKigZWbvomTsIigrxiwjK6el7Fo+xtLdnFUwh+D39IwTv3o0yGJ/vzMh4ERt7zceHhBlTcjLjWzhMT9rU1IS1zhiALCwsyMjAw4cPg4KCWLGfFFovPj6efA8mPz8/NTWVteCTMjAwIN8io0Tph9YgETx5WmtsRnVu3fyixoX5Y+ZGpZczCGxjAwTbAMZ2rq5BKRlZebDGBqb2BgbGSioa7m4eeYVj0kcXYpeLiYr09HCPcvP0DEyzMXV0MLXyMDSszi1MSc/R0dZz9AiSkpMTlxbjFhaQUlPWcnQ2t7VhFUwh+D39IwT/qbdv3yYkJFy+fJk1D9FohL9/S8+fP/+7Paq7u3sQ9zNQ/pfSz6FBIrihqVlCUSezanZi/vjMkskRqaXmllam5lb29mCwjY29m0twalRiurOTq66Rg56BsZW9h4+7Z2l1Q0nluNE5+RERIcaG+t429k6heQ4O/q7OAR5mJkk+Adk5RT4BkWpaelIy4sKSwuJykiJSojrWxrZOjqyCKQS/p3+OYPyksbFx9erVrHlKlCh9Kw0SwdNmL9C28R9dO9fBL8HYMSwipcjIxFxZU9/G1tbSwtLawc0xMNknNMHB3snczt/U3FrfzDnIP6iqoaWidkJBUdno/AJ9fUNrAxPH8ALXgFQXrwgnA8N4G8fcvOrw+HRRKQlhMSExOVF5TSURSSFDF0MLW+q+4K+I4K1bt9bU1PT1/c2PEFCiROkfa5AIXry+yzIgBwiOyW/yjM6PSCk2NLWWVTcwN7dQVdOwsHO1909wDYhxcnK1cgowMbMxs/cN8A+pbJhVUTclIzs/PSvHQM9IW1nTKbzYNTjdyS3E29QyNyyu6+DZfUdOaujpCkuJKajLKOqpCorxK5oqWtpasgqmEPye/iGC7927Fx8f//TpU9Y8JUqUvqEGi+BNhy0DcnOA4KIW79jiiNRic1sPPWs3cwsLGQVlMxtnG994F/8YewdnwNfMwsHeIywwICyvampJXWtOSW16Zr6RgamynIpTQIaTV6yTg4+znsnKJRtOnjr/+PHTlOzR8hoK8sri0qqyAqL8hja6zs4OrIIpBL+nf4Lgt2/fZmRknD17ljVPiRKlb6vBInjzYcvA7JyaudGFLR4xxeEpRaZ23np23lbWNhp6xqY2Lvb+SY4+UY4uXuaOwcbmdtaOfr6+Qcl5tfljpiXmVOeXjTM3s1KUVzS1crd38HcwtslOzFy9ptM7IGrn9u1rNmzWMdNRUhUTlxETEhdxcmc898wqmELwexo0gpESK7Vw4ULWPCVKlL65Bo3gIxb+OaNr2yMKp7lFF0WkFJrY+ena+lhY2Shp6pvZutr5Jjr6Rjk4eZnaBxlZ2Fo6+bm7eyblVufXtMaPrhldUm9uZi0rLWduaG5r4eBoYrN6zVZn31hZLfOM9Iwbt265+fsoa8qIyImJSEv6BIc4U/cFfwUEYx3z8vKwRqx5SpQofXMNEsELVu+29ErNrpkTnjfZLaIgMrXQxNZH38bD0spGTc/EzMbF1jvOzivMzs7RzM7TwMTSziPU3dM7Oac6r3JKYlZNTukEU1MbWUlZQzUtYy2DvJyKZau2yBu5SarpqGup37p1MyUrX91QT0pJ2szBobpxckRULKtgCsHvaXAIJg8ED/BRNEqUKH0lDRLBjVNnqxq65tTMs/QZZe+XHJlWoGVgo6hpZGFprapjaGrtYusVa+sZamKop6qmoaejZ+/q7+buk5Rdnp5fHxafn1PaZGpmJyMhbaCq5WBh3zJ9ccPEaYp6ZtyCgiJiwts7t02bOVfN2EZFV7uifuKhU7fiE1NYBVMIfk+DQHB3d3dubu7HMqREidI30yARPHnmIg0T99zauRaecVYe0RGj8rQMzOWVtUzNLKSU1I2tnK09o+29Qm3MDA31tE30tLW0tAIDQgqrmjMK60Nic0aXTDAxtZWWkDRU087Lr/UPCS+tqhWRlOTm4RWVlGqbPuPoyVPqZs45pdVnLt8/fflJUvJ/O4InTpz4BRGMdmtvbx/cpqdEidIX1CARPGX2MjUzz+za2ZbeCdYeMeHJedpG1gpq+uaW1npmtsaWzlZAsGeIrbmhhZGuvbmRtaleWEh4zfjZBdWTY1OLsgrHGZtYS4lJGRtZ1U+apaqlk52bIyDExy/IFxCbUTtu4rPnz6fMXnDjzqOrt19cv/80Pf3n/XBRe3v9x1/qSBBMo9Hi4uI+i+BHjx55eHh8FsHHjx8fPXo0NQRMidL3oEEjeJGGqWdWzRwzzxgbz9iQ+GxNfWt5NX1DYzNVbWNDS0dLz2hrV397SxNbUz17CyM7U8OwkIia8XOqxs8pr2vNKx9vaGwpLi6VXlRr5+JuYW2TlZPDL8Tr5O6WM2ZeZl7p/Xv337ztfvr87YXrj9/29P60CL5y5aCJSf24cazI90QQvHLlSmNj408jGJguLy93dHT8NIKfPHmC2j58+JAVS4kSpX9Vg0Tw1FmLNM08R9fMMXaPsPWKD4pN1zSwVFDVMTA01tI1MrZytvCIsnT2szUzdLAwdLIwtDczDA2JqGycU9HYPmbC3NLaFkMTS1k5pbqmVgEhQWNTs8iYGAFh/uapMwPjCmOTMk+euvjs2atrt54+ef7m+au3Kak/47fjzp6lx8cf3LDh0wMR586dS01Nxdp9GsFLly4FZD89ELF69er8/Pz9+/ezoihRovRva7AueMYCLTOPnNrZBk7Bdj6JAdEpatqGCsoaegb68orKxpZOlu5R1s5+dhbG9uZGtiY6TpbG4aHR1Y3tJWNmFFW15Jc1GxhZGFs4j2loHs7FZW5h4eHjLSIuVDmm2c7Fw8ktYM+Bo3cevrx+5/n1m4+u3HicPOpnRHBaGn3jxk+PBfv4+KC469evA6CfQLCvr29KSgrq9mkEh4eHYy0Gt8UpUaL0NTRIBM9bus4/Omd09SyXkNSwlHLfiGRNbV0NTW0tbU0pKXFjS0cr90hLRy9rc2M7CxNreGEr0yD/4KzCuviMcne/qIDQZC1to6y8MR7evsO5R9rZ21va2iiryqak51nbWFvbO1+8fOPa3eeHjl7e3nXixLm7P/wdERER9NZWOmrLnqyt6ZWV9L4+FBQcHIwVmTFjBv72k4KCwubNm5FHU1NTZWUlK/avGjdunJqa2p07d5AsLi5u1qxZrAV/VVhYWGBgIHknJCVKlL4TDRLBazfvTspvzKyYmZRXn1rS7B2aYGlhYWlhaWJqZGSoa2zhYOUabuHopa2poqwgq62hrqGq5OXukTq6NDGzxNHN2y84RkVVLzo2w8jEeAQ3l4urq4WNlYGhRnRMbFJyXEp61r07T/YdOt++cP2aLXt37T8fFf0jf0EZ+Xd1obp/maZMIZ9Sf/r06bp168rKyiIiIrBG/TR//nySx+XLl1lR72np0qXbt28nyeCpWbHvafr06dQH0ilR+t40SARv2Xk4u2pGdtXsnOqZmZXTvILjrABgCzMzM2MzUNjM3twpxMLB01BPQ15OVlFRXkFB1tPDM6ugNiGt0MjSwcU7xMTMwdXDR9dAf+iI4Z7e3qaWZnLy4jGxEV1796Zl5l66dGdiy4IJLe3tizYsWrE5LCKaVfCPiODPCauTkJDw+PFj1jwlSpT+azRIBG/edTQb8K2YkV07J7tmpntAtLm5ORBsamKEydDM3sQh2MzO3UhXU0FRQUVVSU1dzd3dOz1/TFhclqWLr6NXuF9gjKm5pa6+/pBhQwODgkwsTA0M1RcvWbT/4PGxjZOnz1lWUFZfPW5qbEJWeFSSt68/q+CfDsHd3d35+fmHDx9mzVOiROm/SYN1wbuOAcFZlTOya4DgWa7+kaampmZmpsbGRibGhgamdsYOgeb2Hkb6OkpKiqqqSppamh7u3gkZ5SFx2WaOXs6+sdGJo7V0dTW0tYYMGRIUHGJsZlhWnnvl+p3pcxZn5JRnF1Rn5lci4OHh6+Dk5ujsyir4p0PwggULWltbB7cVKFGi9KNrsC5459GsqhmZlTOzauZkVs109g03MTExNTUxNjI0NNDXN7UFgk1t3Qz1dVRVVLS0NHX1DNzdfEIT8vyiUo2snJ18ov3DEhydnaVkpIcOGRIZGZWYHHf+wpVL1x8UVzaGRqfGp+aMyioKDIni4uYZwcVlbfMjf7jo4zp27Fh6ejp1iYwSpf9aDRrBR7IqZ2ZUzsiqnp1VPdPFNwwINjE2AYH1DfQMzR2MHQJMbdyMDPTU1TW0NLUMjU0cHFyC40b7RqRoGFjqGNk6u/vFxcfz8PEMGz40IzPj0JFjp8/f7th+2Dc4LjAiMTIhNSYpy8zM+tff/vj1118MDQ1ZBf9ECH706FFcXBz1lAQlSv/NGiyCdxzJqGhLr2jNqpqZWTXD1TfMyNjY0NDYwEBfR0fbwMzByN7f1MrZ0EBPU1tXR1vfwMjE0to2ICbbLzJNVklDQUnTwspu1KgULh4ufkH+xUtXHj99fcfeUxW1k2xdfP1CY0KiE0Ojk8TEpX/99bdffv1VX1+fVfDPguC+vr78/Pxdu3ax5ilRovRfqX+A4PLW9PLWzMoZ6RXTnb1DlFVUVVXV8b+ktIy+qb2Bna+xlROwrKNnqKdrZGRsYmZp4xORbusSKKugbGBiZWphFREZBQRnjc46ePTy3gPnxza2JaYVWDl6+IfFBobFefoFjeTiHjZ8mIa2tqmpKavgnwXBS5YsMTAwoCwwJUr/5RokgjdtP5xW2pZW2ppRAQS3OXoESMnKKamoySsqiUtK65na69v6Glk5Ghvpa+noa2vr6urpmVjYmNn7WNu5wRfbOnkZmZq7e3jYOdhs27l/R9fpqW0Lk9MLI+Mz3H1D/UPj/IIjnVw9f/v9NwMDw/j4RGdnF1bBPwWCz549m8YUhWBKlP7LNUgEb9x+OL2EgeCsytb0shYgWFldU0VNQ05RSUxCSs/MXs/a28DCwcTEQM/ASF1TXUNT09TcxtjCzsc3yMraLiQ8XktHNyQ0ZPXaDR07Dk9pXZieWRwVlx4ckQT/6xsU5e0famJm9fuQPwIDg6Ijop2dnFkF//gIfv78eWJi4s2bN7OzsykEU6L0X65BInh9R1dCdn1iYbOFo19gQqmDe4C+qZmappasgqKohLSuib2+lbeRpaOuro6egbGNvb2Ti7uhqbmLm3toeIypmVV07ChDY8NNHR079xxbsnJL08QZKRn5wREJ/iGxvoGRXv6hnr7BCkqqAoJCOjq6DnYOEKvgHxzBaO2KiorOzk6EKQRTokRpkAie0DzFwik0Nqdez9DcP77Y3j1Ay8hYRUNTRl5BTEJa29BW18rbwMzW1y8oIjopNCI2NDrJ0tLK2NTCJyhK18g0Y3TxxEmTjhy/uG5TV1FZXWBIrE9ApF9wDP56+YV6+gR4+ARIy8hLSEopKirq6OjY29uzCv7BEbxixYqmpibS5hSCKVGiNEgEj2+eYO8Zm1w40ds7MHRUlZ2bn6aBsZKqmhxjLFhG08BG19JL19Q6MDjaztnbxT3QLzja0NDQztHD1Tdc19hqTEPbkVPX9h48WzNuSlRcupO7r4dfmFdAuKdfqId3oIenn7uXHx+fgJqamqGhvpa2ls1PcV/wuXPnMjMzXzNfmfb8+fOoqKgXL16QRZT+FWE73v6cLl++TDbZN9CrV69QXL/e9THRaLQvckd5d3f3rT9179499HDWgveE7op+jjT379/v6+sjkWgcEkn8BCqPWYi6232AGiSCJ7e1OvmNSsqf6OEVGJ4yxs7VR8fIRFlVjbhgTX1rHTN3XRMbGFt7V18sdfUO0TcydXTxcXEP0DYwX7bp6N5j16bNWBSdkBkUluAXFOnpH+7hG+zmFeDm6efp5efm4cPHx6ehrlJRXuri4mplZcUq+IdF8Nu3b1NSUi5duoQwdp6CggLqvb3/rrBFioqKsDXJpdEPKjQ0VFlZGWxi/eYr6/r16zjh+9hbSSHUhI3I+fPn5+TkkPDfEo4rnHv948ePa2pqTE1Nly1bNmvWLLiEbdu2sZb9VRcvXkxOTnZ0dNy8eTPATSJv3LiB8znsoeRdUZiNj4+vqKh49uwZSUDp0xokgqfPmecalJmU3+zs6R+ROs7WxUtYQkxeXlFCRk5ITEJd39LAxs/KwdMrIDI4KiU0apSPX6SukbmktLyiiqa+ucOGrutrtuwrKK4JDIvzCYjw8A1x8wly9fR38fB18/T1Dwh2c/cUEBTU19OJj4s3MTGHWAX/mAgGc8eOHbtp0yaE0eCLFy+eNm0aWUTpXxG2Y2Vl5cqVK7E5PiZ0s6CgIAcHB5g+1s++vnCc7tf3OFVeXk46P4TA4F59h77Xz+oePHjQ1taWGNurV6+KiIh8bJVbW1vj4uJYM39q0aJFAQEBrBk6HQRnV5LSZzVIBM+av9g9bHRyYbOTV0B42jgLRy8hCRFZBRUVTR1DM2tnzxBHn2hDE2s3j2AXz2B9I2tFZS0dQ4uhw4craukHRmdt2H5qUtvCtMwi7wCG+XX1Anz9nV19XNy8vXwDAgIZCOYXEDQyMiwpLrO1sbewsGAV/GMiGPAdM2YMaeqzZ89mZWXhlJMsovTthSNiU1PTnDlzWPMfErlxZdeuXWDiB3l0586dpUuXbtmyZceOHZhFnhs3bkQ/gUNEJ4G1nDRp0r59+7B0xYoVT548QczcuXPhNOEfz58/D78JwwiriL6BUiDkgHwIghFGYmSIGJSCYzZikEZWVhbVRgIUsWTJktWrVzOqwmQowIf0ZNfYu3fvzJkzT5w4gcitW7eSNGyhJ38CwTg5MzIyIh4W/Rx7GTKBwyW9dyAIRmIKwQPXYF1w+3yPsJyYrLGGVvbh6eOCYtKVtHSc3P0cXb1NLey1dE20DC00tfVUVDXklLX0jS2t7VwMTO1H8vIpapnklja1TF+SlV/p4RPq5h3k4unv6OaFHzq5eLp4+PgHBoPB7h5evLx85qZmzU0Tc3LzrKytWQX/gAi+efNmUlISdmmEQV5yRxpZROnbCx0e/I2NjQWnDnxcaWlp69atA5U+iOBHjx55enpeZwrn3cgzLy8PGaJ7AJQ4DQdn4VjT09NfvHgxZcqUqVOngqELFiwAv5DnuXPnqqqq3r59i98ijMTt7e3kkEAQjAxROvoVYsBHDw8PpEHnMTMzu3Tp0uvXr5Ggs7MzIiICCTo6Oqqrq5Eb9ovo6GjU7d69e8Ao4tFR3dzccLRA+t4/NXnyZORAwognRejo6ACdM2bM8Pb2JsNlUGFhIaqBnJE/1gsx/RBMfk4h+J9okAieMXeeZ1heRFq1nLpeRPo4J+8IB89gI1NrTS09PRMrYysA11pJRVVRWdXJNcDc0kFJRV1F04BXQERUSmV0XnXVmIk19ZPDopOc3H0d3bztnd0dnD2cXD19GBY4yN8/wNPTi4t7pIO9HcDaOGG8vf2PelMa+jrSs4eAsXN+bKCN0rcRWGZubo6TknEf1+jRozMyMpAYe8cHEQzr6u/vz9530CFVVVXJLNyuqakpYpqbmwFfxCxcuLCyshIBdBuYzVu3bq1ZswZoRq8AKBm/p9NPnjxpY2NDiiN9D38Jgi9cuODl5UV8q5WVFXImdvXw4cMALgIAMUCJAOIBUPhuwNre3v7p06eI9PX1PX36NGoF8uLYA6Hm2FkQQA1xCEEatguGsOJIiciXL1+qqKjAwoP16Pzk6y39ENzV1YU6w4/7+fmxoigE/00NEsETW1q8wvPD02uUdM0iM8bZOPqYWbvo6Rtr6xq4ByW5B2cYWzioqWuqaWjqGpjLyikIi4rADYtKyPHzSyTEphWWji2rHh8cHufg4mXv5GHn6GrvBOPrB/4GBgZ6e3m6urqKiYlOAH4nNK9YsfoHvS8YbTthwoRVq1aRWVS4vr4evZzMUvpXhI2C7QUQs+Y/pLNnz5aVlSFAmPg+gvft2+fu7k42JXojTnE0NDTgFjGLxBYWFsAfAEdG/NkIhmCHCwoKSL/FD3V1dWFFESZWFwE2go8ePUoQfOrUKZTFiWAAF/VnIxheG0UggGM8MkFWQLCjoyMnghFg69MDEZMmTQoODkYAq4NjCWE0iiNfxuJEMGqCnQ5NhKriWMImCQ4w1LW4gWuQCG6cNMk7oiAyY4yWmWNkRr2Vg6uWtp6enqGWnqFHSIazX4Kuvin4i36prKrMzQv7K6ygaSAiqSjCL8b45H1eRWFpXVBYrL0j+Otua+/q7OLh6+8PAvv4+EbHRvsHBCkqKnVu7ZjbvmDVivWOjk6sgn8oBOPcDfsedgyEr1y5kpSURPZSSv+ivgiC0Q1KS0vb29uREp0QyeCL6+rqrl69ir9btmwBlFNTU7Oysp48eYKUsbGxjx49wg8BMmtr63v37pF8pk+fjp9fvHgRp/xALXoR7CQwhz4DikVGRl67dg3UNjMzO3PmDNLn5ubCli5evBj1nzVrFoCLrG7fvp2dnQ3OAs2w8KgbdgcTE5OdO3fi57Avc+fOJaAn6odgVBXYRRH4FVYExlZTU/PEiRPo8+jAqBjc+vz58y9fvnzz5k14ZBSKeJQFxw0rjRzwK1gNZIJOvmHDho0bNyKGZE7psxokgsdPmuITVRyVOVbP2i0ya7yVvZuOjoGGlp6mlp6qtommrpmamo6isrK6uqqRif6IkSOl5BRV9K2HcItK8AqHegRHRKWEhCf4B0fa2LvaOrjZObh6efkEBAT4+fnHxMbMX7SkrLxGS1tn1oyZkyZOnTxxmsMPOBCBSiKeOBGc01FDwN+J0OEHjmAYQ2zu9xEMYRGodOHCBTbOQEOAknROHGuBPwhdhQTIzcUoHcnYOx0C6BUojthG9BOkvHHjBjGkSHn+/Hmw+9y5cyRb5IYwkiEBfojE5NZyxCAlDgDkhw8fPsQi9El0PwSQksQT9bsjglQVbhfpUR8I64UqkU6OrBAmxw/khkWcIlc4IPIr2GHUAWESSWkgGiSC6yc0+caUxmSPM3PyjspuMrN20dDUV9XU09TUU1ZVl1VQEhYVERYRlpGS1tLWHMHNa2bvYWwfOJRHTJ5XPMwlwN7Bw8DQ3Ns/zMrW2cbB1cnFDQd/P1//qOiY+QsXXr1+s65uvKqq6qTmSQva50ZFxPxwT8chQVpaGvouwuj9jY2N69evJ4so/btChx84guFMce5C6PnT6PHjxxQlvx8NEsEN45t8Y8sjM8boWrpGj55gbOmkoq6roqEjr6QqKibFJ8AvLicqLSctLysrKSkhLqepY+XDJ6Y8gl9aXUQuzD1QVVVDSFjMzSvA0sbBzsHF3cPD19c3OiZ29ty5V69dW7+uw98vREtbKzoqduWSRWZm5vb2dqyCfwQEo0lhNMjwHNTZ2VldXU2GIyj96xo4gnECHh8fTzobJUpfSYMdiGie7J9QGZxcbeQYGJXdYGBmq6SiKaeozCPAx8cvKCgiLKUsJaekICUtCTusaR0uIKvPJyzNJ6IgKyzl7e7Pzc39x5ChDs6eNjYOzi6uXt5eoWHh02fOunL1+oZ12yPCEiwtrTU1NV1d3dtnLzAwNPqxEHz48OHc3Fwy+nbr1q3Y2FjqLuDvRwNEcGJiYkJCAk6rWVGUKH0dDRLBzVOnByRWh6aN9YnNCc0Yp6Fvys3NJyEtIywhwsXLzS8qLKmsIC4lLSgqIaGkI6fvJaPrpKxlLCQqKyYp6+zm99tvv6qoKFvbOjkAw56ewSEhLdNaL1+5umb1loiwWHs7ewMDPS0trbi4hKXLVvr7Bzk6OrIK/u4RfOfOHVgnMkLHeUcape9E6PBubm44TZn+cVVVVSkpKZErYJQofVUNEsELlq8PSakDfAPTxgZmNMup6f7+xxBJWVk+Ef6RvNxCEuJSKqrCkrLSygbyWnZD+RVMvUb5hSUL8gtJyCiZWznx8wvAYri4ebm7ewYEBzWOb7pw4dLK5ZuiIhMdHB3MzUx0dLS1tDTH1I5dOH9peVmNg8OPgWAsLSwsPHjwIIlvbm5etmwZCVP6foStufGTWrly5b59+1ipKVH6mhokgpet2+GbNMZn1ATv1El+Kc3KOqbDRowUl5HhEeblFuATk5GW1dSUVtYTktQcya8wnF/SLqwkKHq0ED+/tIySpq6RsJCIj7efp6ePn39A3fjxx0+eXrF8Q1hoLFBraWFqoK8rISGhqqZaWz1mdMbo2poGa6tv+Ka0vr55CQkfQ/DNmzddXV0/iGC0JNg9Y8YM0qQ7duyorKzkvBJNidIX16tXr9i9kepsP6IGi+AN+3xGTfROneo7apJDdLWSvuUILm5peXkhCUFRKUkpZSVpDV0Baa1hPELDeITFlY28U8bbOgcKCYqIS8oqq2tKSUmYmpi7e3hW19QcPXF64YLV4WGxjg72FuZmhno6oiLCI4aPkJWVra2uzkwdlZmWY2pqxir4GyB42bJ5zs4fRDD6emJior+//wcRfOzYsczMTPYQMMwydYP6967z5+mLFn1mmjaN/qGb0r4H0Wi0qqqqiooK7MWPHj1C53z58iVrGaUfRINE8JL1h7ySp3olTjDxzlFzSlUx9xjOxSuvoiIkLiAuJyelqikgqzlMQP73ocN5BcUtPEe5hOWLiEnzCwqLScrIKSrrGehaW1l6evscPHRo1uzFQQGRjo6Olpbmevo6wsLCv/766++//45AQlyCvbWlu7OnsbExq+CvjeBz5+jp6fNmznwfwVOnTp04ceKSJUs+OBBx+vTpuLg4UjFQePTo0Z945SCl70KXLtETE3G2Qu/q+uhUWkqXlKQzHwz7PrVr1y50SOzFr1+/XrRoUc9fH3uj9P1rkAhevP6AfUStpn2yokWUsU+xul3MMC5eSRkZfmFeEXk1MUXdYQIKf3CL/PL7H3KymiEp4yTktEeM5BrBNVJUQkpeSTUnN8/E1Dg0LLJ12sKg4Ch7O3vw10BfV05JdSQ3HxD8yy+/8PLy5ufnL2hv19XSMzQyZBX8VRHs50dPTqZfu/bBseDIyEjYDcD3fQRHR0fHxsay3/8Lv7xgwQISpvSd6vZtemws/dMPy+zcSY+Px9Z93wW/ePFix44dt2/fPn/+PI61mL148SIJYCmOwefOnTtx4gT+Yv+CTp06deDAASy9d+8e+gl+SPIhQoKzZ892dXWRTNjXb2FykcPJkyfxlz3IcPXqVeSMDEknRM8kCL5x4waq1N3djfidO3deuXLlwoULODMjd+MgAWZJVnv27KGeEvp+NEgEty1Yr2gZqWwTreOaauxbpO6UxiUgLiUjJy0lIalmxCOm8huX8O8j+YdzC6ioGEUklw3n4v5j6JAhQ/8Qk5CUl1fOzMy2srY2M7MNYPhfZytLMz09TUdHOxFp5d+GDP0/APiXX0aMHJGYlNRQ1yQuJmlo+NUQbGNDnzGDPnMmY1JVpTPfRTJnzpySkpKZHMLBwMTE5MmTJ3AZfn5+06ZNYy1gysjICAaZtOS+ffsKCwupUbnvWo8eMdh64QJr9oM6doweGUlHT0tJeR/BsJzoh+gVAF9jY2MK82X8CxcurKurw1IgMioqCrRNTU0ll/UAPmtr68OHD69ZswZpSAfm1PHjx5WUlABHEBY5d3R0oDuhay1btgyJV6xYQS5FILexY8ciZ3T4goICAJeN4Lt37+Jk8f79+4gsLi5OSEi4du0auiX5ISwFIq9fv47eC9ZTd+l8PxokgifPWqHhmKDnlWHin2sSVKZklSQmq66som5haiGqoDuEV+L3YdxDuAVHCknLKet4BcaO5OEZzjXy9z9+8/Pz0dTUTkxITk9PV1PXtLC0NDU1NjbSTEmK0tYzGSkg+suvv/wf/vv1l6HDhzo7uzo7evLx8RkYGLAK/rIIxrpfvszYFcm0aRNxxM+ePYO7gWtgC72W3KKE5kI/7rd06dKl5AQQO0BSUtLjx48ZmVP6PoUt5eXFwGtDw0cnkNTfn37rFqM/fAjBUFlZGU78EcDWz8vLQwAGFqdKCMAFHzx4EFZ09OjRgCAjNfOFZ05OTpMnT/7gHvfo0SNzc3NyIaGzs9PLywvuFTHEL5PXAGGpj48PuiJiEHZwcECebATjqM9+u/yMGTNAagRWrVpFvqyxZMkSHCqQJjw8nOLvd6XBuuD5G/S8sgz98k2CSkyDyzQtQgVEpNS0dNW1jUUVDf4YKfTL78OGCUhwi8nzi8roG1tJyyoJCAvrG+iPb6gzMjROHZVWVFgkLCKspKysoalcmDsqJjKaW0Bs2Agu4Bf67fffRnCNUFNVVFJSlZIWd3T8Ou+I+KLCXgFbhPNN1jyl71PAXGAg/exZxljwxyacDJWWMhJ/EsEwpwgAwUVFRQjgrJ8geN68eYDvy5cvwV+AD6YVkThyV1VVhYaGkteG9BMngrds2eLr6wsE48SLjBjg9AsIxmEeCAbZEYOwra0trMDHEEx2BzaCgd2ampp169aBxZil9P1okAiesaTDyL9QxzPPJLDYwjddWlFzJDe3pKIGbK+wnPZvQ7l/GzJypJA8t7CssIS8lIyiuLQSD59AbXXV3DlzbWxsa6trAnwDBAQF5eXlx9aWjamskJJRGs7F+/sfQ3799ZffhgwBfw0MtIMDvdXVVRrHVUWGs+7Yhb5PBKMZ29vbZ82axZqn9N0KHT4mhv7Jp+MYgGa+I+JjCAYfY2Nj6+vr4U/HjRuHPvnixYuFCxe6urriHKikpGTSpEnopTgkp6SkrF+/nrxeHWdXiA8LC3vfhwLB6urqcLhgKHLbtWsXelRbW1tLSwv4O23atNbWViTbs2cP8kSarVu3IvDmzRt0OT8/P/z8xo0bRkZG+/bte8v8Jl5GRgaOAdgpUBzKPXv2bHR0dGlpKdzx/v37B7fXU/oaGiSCZy7bpu+dp+ucYu6TZuoUIq+iyiMoLKmo++vQkfzicr8NHT6UX2KEoNwIbiFhcalhXNxKatq6egbLFy8tKa+3tLWbPWOmJ+PTRALqaiqTJ4xzsHcdySswZOjw337/44+hQ3j4+Vxd7VYsmbegfea89tZtG9dGhYezCv5eEYyTUBgf6nr0D6AvgWA42R07dsCBoisClwgDgpiFRb1z5w4giBiEwT4EgGmQEUL3uHr1KuJPnTrFyuhP4edmZmbHjx/fvHkz+2Id9s0zZ86gnyM9ez+9desWmA5Y02g0OF9SEH5y4cIFBNAPcXhAJISDAVmKKoG8K1euRBi5hYSEgMgkN0r/ugaJ4FnLtxt5Zepb+Fm4R6jp28gqqslrGA7nFv5tyFAuPuGhvEJc4sq/cwn/+tsfvIKC//vL//LxC1eUVTQ3z/SLyHB085k/d15SUgIfP198dHhlcSm/kOSQoSOA3z+GDRUUFkxOjt60bum2DWt2bl7fuXHNnk0bYkNCWQV/lwiGGYHHYT+jTOm71t9CcG8vPS7ugwMRX1aHDx9WU1P7SqO0gDX878aNG0HnmzdvpqenwzKzllH6tzVYF7xkk7XvKEMTe14hCRU9az1jKxkVPVjg//vlNy5eQR5RGT5JjV+GDv/lt9+GDBv2P//7P3oGhuOb2rxDMwOism1cAydPap0/bx43D092eqqurvmIkTxDhgDCQxUUZWqri3d3btzVsXH31o37tm/duXnDrlUrooNDWAV/fwju7e3NyclBBVjzlL5zDRzBfX30qVPpbW2MwNcU9sHLly+fOHHi2rVrrKgvLfhl2OS9e/eC9eTOOUrfiQaJ4HlLNxpbe0kKigwbPlJGxVBKXlWQcT/ZsF//+ENNXVtAQplbTHHYSB5eAb7/++WX3/4YWlUzzj0w3dEv2S8qw8LJr6Vl1pLFy3j5+R3sXHj4hYcOHTpk2BBdfa2ZbRP3dG7c3bl5T+eWrs4t+7d3dm3dvHfThpgoxlUOou8KwWi9xYsXk/t+KP0YGiCCS0sZj8bV1zOMMCVKX02DRPDUaTMVxES1JUSHj+QWl9cYyi0kIqf+2x/DJRWUzMythCRVRwqKCYtKjuTj+/W3320dnTLz6/TsI8IT8gNicyPj0mprxgX4B47k5ublEx42kouHj9fC0mj1ykVd2zbv3rpp746te7Z1dG3dsn9H597Ozfs6O2Kio1gFf2cIPnfuXGZmZr8nNSh910KHj46mv36N8/OPTqdO0a2tGRR+S31oitLX1SARPGl8o42SoomMFJ+gmJCE4q/D+aVUDIbzCpo7uplZWvKJyozgFlRU0+MXFuMTEiyrGqdnG+4UMMovIisoNi8hOSN/dI6Brt6w4cOHjhgpICocERm0ctncA7u27d3WsX9n575tW/dt7wR5923bfGjP9lOH9yclxLMK/p4Q/OzZs8TExFu3brHmKf0QQoePj6cXFNCLij46jRpFDw6mU295pvT1NUgEt7e2BRoZGkhLi0gq/z5C4NcRAjIapsr6ZtbuvoampvxC4nDEsip6w7l5fQODvYLStO3CPYMzA2JzIpNyZ8+akZWebWpsOmToUD5Bvtz89OVL5m/btBbMhe09uGs7+Lu3s2PX5vVHu3bcuHDm+f07KUlJrIK/GwT39fVVV1d3dnay5in9QIK3hQv+xPTsGZ0aMKX0TTRIBC+ZNctNU0VOSm6EgOQf3MK/cwkpG9hYuvsbWdnLKqqMGMn7//7v/yRklKXlFEKjMzStQyw8Et2CUoLjcxYuWNK5YW1SfIqamsbw4cPiYkKXL25fsWjB1g1rQd5Du3cc3L0DID6wq/PymeNPb9+8f+Xyqf3742NjWQV/NwheuXJlY2Pj4FqP0ncibLy+d5+Z3vbRaR/ZyBcuXCC3mrHm/1Rvb+/Zs2fJXQd37tw5derUD/fZKlQYa/fZy4PkZriLFy9eunTp7t27H1tN+BW0xtWrV69cufLkyRNWLPMTt4iByB79+PFjhFHoD9dc/0SDRPC8qZOMlGQFpRWH8Ij9ziM+hFdM18ZLWcecV0B0JA//r7///n+/Mq7CefmFGDuEattGWHtE+0dlFlfWHdyze9+OrXo6OvwCfA521quWLFi7bPG6Fcs7N23YyyDv9v07O08d2v/g1pWHN6+fPXTowLZtl06dTktLYxX8fSD48uXLycnJP9lXHf/bhH4/51ZfzjlawfmPTljqc4R2++0H9pEDBw6Ul5enp6efP3+eFfWn0DHKyspyc3MR3r17t5WV1TPY6u9G2OXBTdbMR9Td3d3Q0JCSksKaZwqHln53NO/fv9/CwqKkpGTdunWLFi3Kysr64K2ZQOqmTZsUFBTa2to4b7xDblFRUSEhISTbM2fOBAYGrlix4tOflfrJNEgEz5zSpK2oxCWq+Aef9FA+KUl1EwN73yFcAr/9MWzY8OH/9+uv/+///Y+mlq6dR6SRY6SubVjkqKLkrNKd2+Bzdx7Y2enmYmNjbbZ+1dJtG9bu2LR+++b1XTs79+7oOHGg6/aViw9uXD177PCB7dsPdG47vm/f2zdvMjIyWAV/Bwh+/vx5UlISNQT8QwudfvGdvoYrfS9p9Nd9H56e9dIzz9BM9vbe/xAQxo4d29HRAQv8wT2os7OTIPjVq1cBAQF/C8H9MhzcHvr+r9gxcKP9dpYPFoFjDKf1gXBoaW5uZs38KUBz9erVJFxYWIhdg4SJ2Dm/efNGRkbm+vXrZJatmpqa/Px81gydPmnSJFhm1sx/hwaJ4FmTJ8rJKPLK6A4TUuQWUzFxC1fUtfh///d/v/z66+9D/sC/v/8x1Nk9VMvCT8My2M4vKTq5aPbs+eDvka7d+3d0tk0bv2ntyq5tW/Z0bN7dseng7u2H9u68dv7Uk7s3r5w5dWDn9n3bOjHBAj9kPl///SAY/aO+vn7Dhg2seUo/oNDjNzzoq7jY1/vxvo9FtZf7Zt/qSz9Dex/BoJifnx/YsWfPnn379hUUFOAk+vjx4zk5OYQy/RCMs2/4xHHjxsF+AkZTp07dsmXL+7sezq6waMqUKejb5OT9/2/vTKCiuPI9fM6bSVzijvHFLDPJzHvJZDImLuPyNC4xRFHRUaMixC2YGSWKmrzgEqMZjUPcEhiNSxLR+FRQQQRBeu/aqxd6gW5k30TciCvgxub7NdUhBtFgCbJ4v1OHU7fr1q3q5t6v/nWr+97w8PD9+/czDINA8uzZs2vWrEHdO3bsGLIVFRUhLEWsilaAGBOvpKamohAUS9P07t27f/jhB6mlnD9/fs+ePciAohClIvCcMGHCoUOHLl68iKMgM7bu3bu3pjfgyJEj0vwvD6RghCb+/v44hPQ6YhScAArBR4RTqqeCd+zYQRRcL8K2b3vh5de7/Wlol9+98drQ8V7T//7cSy+3bdemTVtEwAiBn/jdK2+MHD+n79szBo2ZO8V/aeD//pPXa10hMM/YDLzDIloNHFxs0GuNtC7nhOPSmdOnMtOTTQYzSxspvZlhLCyTk5YqnV7zUTDqK5mRvqXDX6panAqxVpVU3HP5tqByS35lWdXtOhWMO2XcfePWG4aFXseOHVtYWFhaWjp58uTk5GRkqKXgK1eunDlzZtiwYSdPnoRiNm/efOHCheqSfgbumzhxIjJgE1SOYmfOnCl1sMKeBw8eRMS9evXqTz75BAc6evQo6iFaR2xs7OjRo9Ei0tPTcUePcliWXbJkyc2bN5OSkpYuXYrD+fr6IqQtKSlBO4J2Dxw48PHHH6N8lKzX64OCgpAZGZYtWyYNwol1HCsiIkLqiEAJSAK8C7QyaR1Un/VtvDvsCPPOnj0b5yk1DQgXJ5+RkYEC582bB/PWUjDOXGraRMEyFbzl3/9+9tWB3V4e+uJrwzynz/f0frebh4dH96fbPfUUNNyuQ+dXB3oNGDWjn+fssT6LJvgFHouJs3CsTRBMLGUTuCSjaBE5E6N3mg3nT+ady89LtVotPIsFwa+o15kZOj05qfynf3MzUTCaR0BAAOqrO01ogVyvvD1QLH8vucLfec/Fx14RcKKiosoVL9epYCANPIYV6Hjq1KlQMNyBkLBOBUsdEXDf2rVr4UqdTodkLbDLlClTatojz/OIVaV1XPh9fHywsmnTprCwMKxwHCf5EQHy/PnzsRdC3TFjxuCVRYsWQcExMTGRkZEwGiTeq1evGmMCbMK5SWLFvljHKwiKEeGmpaWNGDFC0qikcqw4nU5oGiAo9vT0lNZxkcCVAFtromAE6QMGDJDaJuJxHBTXCZSMS0VeXl4tBeOOQepDx9VI+qAkcAdAFFwvQrZu6/5fA3r895CBg8aMmjzzzbc8O3bs2PP557s+3ePJtk89+9Kf+3tO7/2W3/Dx/uOmzf/Xxq1GRLUcBwXbTYLdwMG/qXbr2bzsc3k52U5nksEI+SJGxl8jrTezDGLhkjvG9GsOCoZ5UenvfvZCaFmgukOvt+7bzDNKq/6V7cpRHwVDZNOnT4eCy8vLJ02adB8FIw6FwrZt24YXkazFiRMnvLy8JFfCWbDhO++8IyUR6kKsWIGCd+/ejZU7FYyw4E4Fw2LBwcFYAadOnUIc2r9/fwS/SKLhQIXSCJZFRUUGgyE0NLSm4eCg8PXIkSOls8VbqPU47v4dEXh3ffr0sVqteHfQMcqRfgmNoB7h850KxtnioNIjQVyNEKRL2oX6w8PD5Rmp5SJTwdu/++H514Z379nL680xQ98e+0bf/p06d/nTG32ffvb5Lh493xji1WeE76Axs+YsXLFo+VpKq03keDPDJLKs1cifsCeeO3XyQmFhmt3iTDQ5E802QYR8bSKfyNImhjJQupPZ2e4jVdMcFLxly5aaxw6ElkuDKBh38QsXLkR8J/UnbN26NSoqCjf148aN2759OxSG2ihNnGEymSZOnCiKorQjFLZnzx5pvRbQEOwJYNX4+HgIHbaNiIiACtetWwdjotrDiZ999hmKhTpxp3/69OmQkBBIMDc3F81h9OjREDdkt2rVKiTVajXCZ5SMc0OwiWKPHDmCrdnZ2RA64tOMjAzocvXq1ajYSqUS5w8bIDMCbZz2mjVrcOY1w7aBuxWMbLjqwPhwN85/2bJl2AsXDMgXZeJDQCi9f/9+KBiXq379+uEd4a3h44KgpVaMvTZs2LB3717k3LdvX61vXDwOyFTwptCdHZ/p1e3pl//6Sp/+g4f//sU/dOzc5ZXeAzt06dZ30LA3vfx6DZv27uzAuYErtu8ME2jazHJGioaCC3OzL5wpzE9Pt3B8IsekJCYmm4xWQXB1TSAEprQGvS4tOQn1z32kappcwai+aAaP2y1Sq6RBFAzQcO5sO5cuXYKhIF/8RbJmq7QCIBfcSEFwyFm9R93AiRCWO1HdqYoAFrtLSamompU6kXKikDtj7Vu3bt1ZDoqVuhEkamXGeeIag794/c46j7e2Y8cOd6Ia6YhASiIzbhNxtZCSOCguUdJWKdudSHkksIvD4cBZudOPEzIV/OXmb9p7vOzxbK8/vvTnV1573aN7j05durz0au+nez43YszUge/4Dpvg/8GCFR8t/af6uFKkXAo2M6xdNOScSIVzk40mq8AlG40OsznJIFo4zswiD22iKZsolhYXuw/zE02rYMQagYGB0t0ZoaWD6l5/BV8qu/2Bs6L4oQfqgZuWLl36/fffc9VzXrRQ4Aqo2Z0gNBAyFbx+8/aOPV7t/vs+3Xu+1PO5F7p5eDz1VIdnnnux98ChA0a+O2jMjAl+gf8I/PTA/oMixRgpJpHlXEt1d3CSaHCaTQ6z0WEyY3EpmOcR/BppyszQZ+ua27UJFVxWVhYUFOR0Ot1pQgun/gqGeRenVpwokdNA7qaiokLqGyUQ7kS2grd1fe4v3X7Xq1uPZ3v85zNt27V74oknn33hDyPG+gzx8vPymT9+esCnn3+pSlAYaMbMshaOx1+7SUw2YjEmGcUko8GBWFiEf10BspGiBZ0u3emsrKzj+15NqOBdu3ZFRES4E4SWTz0VvCqzckVGhXi5YfxLINwLmQre8PUOjxf6dOrxYqfOXTp26uKacrP9U4NHjBnoOWWo95xRkz6Y9v7imJhYVqc3sIyJYc0sYxMFG8wrCnZRSDYZIGKrwCcKUDNjohleq7NX/xDOfYBf0lQKFgRh9erV8j4iQvME/8v3HRVppVXZ1+65aC5U/oUrV/5I/u+ERkemgjeFfOvxQu/2nTw6dOz8ZJu2bdq2fe31PsNH+7w5btbgMXOGjZv95eYttFYn0LRYrWCLwCYZBYfF4DQZ7SKXbKBtvGgROKF6XEoDTSEKln4IVydNouCi6hnp7/4KPaGls7ewcsfJ+y1f5VYiDxEw4REgU8Ffhe7s2vOP7dp3gH//4ze/6erRbYrf3KFevgNHzew/aubU9xcrFUpOpxdoxsxzFk5wPXkzGewG3m4QnKImSdC7vhHBMyKtF2lK0OuzU9Nu3/tMHr2Cy8rKVq5cabPZ3GnC4wQqIvEv4dEgU8HB6zd06NylTdv2T7Rt89snfjv0rXdGePkMHz+nv6ff/4ydvW3nblqt4fR6kWFsosEi8Emu/l9jksjaRT6Jj00yCA6z4WxBborNYqBZq2gou+/YSI9YwfhM9u3bV1M4obVSfv1yyZmU+y9X8s3lN2t/RYdAaChkKnhd8L/atWvXpn27jp27dPHo4Tl22vBx7/X39B3kNct/0Ypj0TG0FiEwDbfaRaPVKDrM8C9joo6aGK2ZUyeZjPnp6ecLTzotFlFHX/y1GWofsYIdDseyZcseqxHzHkMqbpZkxazI12w+qQu915IeucQQ3Pvm1bPufR4Es9m8e/fugwcPutMEQl3Ij4KfaPNk+w6d2rbr8Hq/IW+N9R04yu+vnjOGT5jzzTc7NAoVRzNGlrGKous7D6JoN4gOJtrKKiwCkygKyWazTTDmZ6RbBDE/69cn7n6UCi4uLp47d650CEJrpbLsenbcqqv5Zne6Lm5cKkg98PeU/5tTVvLAleHGjRuTJ0/Oy8uLjo4uKSkRBMG9gUD4JTIVvHX7t0+2a9+xU7duPZ7x9PbxnPh+v7f9Bo5+b8U/N+zbe0Cr0vCU3sILFkGAfJONRjvP2elwC89beDHVbjNzopllHGaTw2qTfgV/fx6ZgisqKlasWGGxWNxpQmuksvxWnmr9xfQ6xsqp4VbJj2kHF5SeS888uqxOBUvf8y0vL5daEP5ev369Zginixcvjh8/HsnKysp9+/aFhYVJY98gKe2Cao91rCApbQJIYhfou6ZMbKrZESvS64TWhFwF7/i+U5fuHTp0HjTM8+3xM/76tk/ft31nzg/aviNMlaDQKI4LNJXoUrBoE8Uk0WBnlVYmwcwLTos1xWJNMhuzUlMunjlzs34TTzwaBeOjiIiIQGshFb01U1WVHf85wttz1sPnbJF1L9bI1PD5xQV2ZK5TwQhvIyMjHQ7H119/jdpYWloaEhLC87xOp9u2bRs0qlAo+vfvHx8ff+rUqTlz5ixZskSv18OtJpPJ29t7//79HMehJickJIiiiK3S+DUoc9euXajYa9euvXXr1pUrV1DszJkzsf7FF1989dVX5CearQ+ZCg7Zur1r1x4v/fHl8VNm9R46aZDXTK9p85av+jI84jCl1bB6rYHjLIZqBRtMdqzoI6w8pMynOZLxYmF+dtGp0zdK6jtD7aNRcHp6OpmRvtVTVVHm2OVT5Dj2Y0rCvZZCYVeuYl117roVDNtOmzYNnrVarRDud999FxoaKm0KCAjA1suXL3t5eUmPE1AzkUHaikh55MiR0pxscKs0auWnn36qVquxAqfbbLbMzMxRo0bl5+fjFQTLqPybN2+OiooikUGrRKaCQ7du79ip6+Rpft5TP+g30tdryjxf/4/WfRmqVCgYrZbX600CbxY4q+vrEKJF1NvpaAvHJ1vMTqsdtawwPzfDkYp7K3dxv8YjUDACGTSes2flPHghtCSqqtIjF1dW3K/761pRZr56k2vtHgpGNIoINzAwcMSIERkZGStWrED0Km366KOPwsPD71YwKlhxcTEu8OPGjZOGjpw9ezbqLVZWrlwJm6Mloj4j7M3OzkYeFCt1QcDLgwcPbtGDSxDug0wFb9y4+S+v9wkIXNZ/xLsDRs+Y/F7g3/w+3H8gQqfRMBqdgWFMnGDmRSvHJHJcMhdr59Rmls1wOsyckJORcTq/4Ob1Bwg2G1vB5eXluNFjWdadJrRiGkLBR48eRfVD24GINRqNVqsNCgqCcCFZxLZwaGFhYc3Au7t374ZYYVLcZkHNnp6ep06dQmYfHx/Ey6h7H3/8MYJcvDJkyJCCggIpT1xcHELstLS0jRs3njlzxtvbG8n6PDghtCxkKvirr0P+EbD4LS+f3sOnek7+YOy0eYuWr1UnKPUqDa+n4VmLYDRzooVR2ETGwiRYOM7p6gK2pCYnZzhSrte7C0KisRWMhoQbyfpH5YQWzIMouKqqMjN66d0KRn07cOBAZGRkTEyM9MxNFEWoNiwsLCUlBRkOHjz4+eefS7HtpUuXtmzZEh0djZy4zK9evRrryLZmzRrkz8rKCg4O/uabb6Bv2BzxMiq2NDscVL5hwwboHgVu27Zt/fr1vzrzMaHFIVPB0bHxU3w+6DXkb31HTp86e9H0uYtijsZTKjWj1RsZzsTyJo4zcYyRirMJvFXkrJyQl5FpM5nzM7N+PP3AN/uNquDc3NwFCxZI94yE1s+DKPhyjpBzfG1led11o1bbQfJerek+m2pR/5yE1oFMBR88HNNnsPcbw6eNnOC/MGj192F7tQlKKFigGRPDW0QDFCxSVCLHWgVDIstbjebc9LQTdtuFM+eqHjzYbDwF41Zx0aJFuG10pwmtnnoruPiUPevYyoqbPw9tTiA0ODIVvGfv4T5DJw/wfG/mvE927dmnUSo0CgWj0fEUbeEEE8uZGNYIHbvGCEaSNQtidlpKYXbu1cs/zwhQfxpJwbh/JDPSP3bUT8Gp4fMzo4PKSl3PzQiExkOmgr/dc+j1oVPGTvvH2vUhOrVWrVDoVRpOSxkYFsGvySVf1sRwiaLRRLNmjs/LTM/Pyjqd754/9UFpJAVrNJqNGzeSLuDHi6oq554Zeaov89Qb7rVkxiy3bh0l73fJBMIDIVPBO/YcGjnRP+izdXGxx9UKpTI+gap+EGfieNePMgQBKyZehILNDJeVmnY6/6TDmlRRXsdw7PWhMRRcUFCwYMECaaYvwmPF9Qu5pefS7rMUFyZfv5Dnzk0gNCYyFRy2P3rJ8i9iYuLUCQp1glLnehCnqx6aUjDx+MsLFAMF241mM8OnO05YeGNJsfxZWxpcwdeuXVu8eHH2L+dpJhAIhEeMTAXHJmgjDseoElSqBKVWiUXFUbTAsEYWCyMyjIFhTRxn5Q02gymRN54r/HkqbBk0uIJDQ0PJjPQEAqHJkalgZ0q6Il6liFeqj6v0ai2t1fE0zMsKFC3SNFaMrOuraZxal8iJackp8o5SQ8MqmGXZ4OBg6adHBAKB0ITIVHBWRpY6XqmKVybEJuhUGkanlxTsehxH04xGh3VWQzEqrVUw3njo/tYGVHBBQUFgYCCZy5ZAIDQHZCo4Bwo+plAcS4CFaa2e1VOCy788r6eMLMtodYxWrzh23EhzRWfPufd5CBpKwZWVlSjK4XC40wQCgdCkyFVwZo46Xn08Jt6lYI0OCuYpmtXqRZqBhXkdFRcTg1g4JzXjIbsgJBpEwTiTnTt3RkVFudMEAoHQ1MhUcHZmjuKYUhmn0Kt0jI7iYV6K5nQIgTmeohQxsTqVymow3fppBOuHpEEULIriqlWr3AkCgUBoBshVcEY2FFwdAsO/LBTMaPUmXjQwnF6p1CtUBpa7UOQSZYPw8AouKir68MMPpUECCQQCoZkg+3FctjJOBQWzOgYKpnR6Xu/6Uhqt1mqOu36pnJme6c7aEDykgsvKypYvX56cnOxOEwgEQvNAdkeEKwp29UJoaVpHsRQtUAiAKeWxeCg4UTCUl5e7szYED6NgvMEDBw4gp7x3SiAQCI2H/ChYFa+q7gimOYphdHqBYpVxx3VKFaXWFF9t4BmuHkbBVqsVITAZ65pAIDRD5Co4PUuToKW0FK3VczqKZ1idQqOOS1DHJ+Tn5DZ4vClbwcXFxch//vx5d5pAIBCaEzIVnJmWpYxT0loK8S+t0emUaq1SozmusCdaGmPgMXkKlrqAscmdJhAIhGaGTAXnZOZoj2v0Si2vd/0Wzmax6FzD9LAP/0O4OpGhYLyv8PDwsLAwd5pAIBCaH7IVnKtT6CiVltXqGT1VXHw1JyfnzOnT7s0NjQwFp6WlBQUFkbEoCQRCc0amgrMzcyg1RaupuOiYFIfzeum1mzdvyCuqPjyogq9evRoQEEC6gAkEQjNHpoJzs3P1av2xyNjow0csYuL14tJGnXvigRRcUVERHBzMMIyUJBAIhGaLXAVn5UYdOhpzOEYRr+K0bPGVBv4WWi3qVHB8fHydCo6Njd2yZQuZjohAIDR/ZCo4IzUjfN+h4zEJCXHK9BMNMxbPfbhbwQh1g4KC7lZwTk7OwoULbzbQ2BQEAoHQqMhU8Aln6tGoWCiYpYWG/SFcndyt4Li4uHHjxtVScElJSUBAwLlzDTA8JoFAIDwCZCo41ZkaExmjildfvnTZ/VJjUkvBRqNx8eLFUVFRdyo4JCRk06ZNarVaeoVAIBCaPzIVnHYiLTYyJjM9q7G7ICTuVPCsWbN8fX1PnjxZ63HcpEmToGAyHRGBQGhByFRw+ok0nuYeme/8/f3X/UTfvn2VSiVeRMDr5+cnvRgYGOjt7U26gAkEQstCpoLzc3JLiovdicansLAw6ycoipJ6n69du+Z+KSsrKSkpPz9fykwgEAgtBZkKLi9r9EdwBAKB0OqRqWACgUAgPDxEwQQCgdBkEAUTCARCk0EUTCAQCE0GUTCBQCA0GUTBBAKB0GQQBRMIBEITcfv2/wPeSEy+QZBmgQAAAABJRU5ErkJggg==) Pre-Trained Image Model (VGG16)The following creates an instance of the VGG16 model using the Keras API. This automatically downloads the required files if you don't have them already.The VGG16 model was pre-trained on the ImageNet data-set for classifying images. The VGG16 model contains a convolutional part and a fully-connected (or dense) part which is used for the image classification.If include_top=True then the whole VGG16 model is downloaded which is about 528 MB. If include_top=False then only the convolutional part of the VGG16 model is downloaded which is just 57 MB.We will use some of the fully-connected layers in this pre-trained model, so we have to download the full model, but if you have a slow internet connection, then you can try and modify the code below to use the smaller pre-trained model without the classification layers.
image_model = VGG16(include_top=True, weights='imagenet') image_model.summary() transfer_layer = image_model.get_layer('fc2') image_model_transfer = Model(inputs=image_model.input, outputs=transfer_layer.output)
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
The model expects input images to be of this size:
img_size = K.int_shape(image_model.input)[1:3] img_size transfer_values_size = K.int_shape(transfer_layer.output)[1] transfer_values_size
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Process All ImagesWe now make functions for processing all images in the data-set using the pre-trained image-model and saving the transfer-values in a cache-file so they can be reloaded quickly.We effectively create a new data-set of the transfer-values. This is because it takes a long time to process an image in the VGG16 model. We will not be changing all the parameters of the VGG16 model, so every time it processes an image, it gives the exact same result. We need the transfer-values to train the image-captioning model for many epochs, so we save a lot of time by calculating the transfer-values once and saving them in a cache-file.This is a helper-function for printing the progress.
import keras,os from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D , Flatten from keras.preprocessing.image import ImageDataGenerator import numpy as np trdata = ImageDataGenerator() traindata = trdata.flow_from_directory(directory="data",target_size=(224,224)) tsdata = ImageDataGenerator() testdata = tsdata.flow_from_directory(directory="test", target_size=(224,224)) model = Sequential() model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) def print_progress(count, max_count): # Percentage completion. pct_complete = count / max_count # Status-message. Note the \r which means the line should # overwrite itself. msg = "\r- Progress: {0:.1%}".format(pct_complete) # Print it. sys.stdout.write(msg) sys.stdout.flush()
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
This is the function for processing the given files using the VGG16-model and returning their transfer-values.
def process_images(data_dir, filenames, batch_size=32): """ Process all the given files in the given data_dir using the pre-trained image-model and return their transfer-values. Note that we process the images in batches to save memory and improve efficiency on the GPU. """ # Number of images to process. num_images = len(filenames) # Pre-allocate input-batch-array for images. shape = (batch_size,) + img_size + (3,) image_batch = np.zeros(shape=shape, dtype=np.float16) # Pre-allocate output-array for transfer-values. # Note that we use 16-bit floating-points to save memory. shape = (num_images, transfer_values_size) transfer_values = np.zeros(shape=shape, dtype=np.float16) # Initialize index into the filenames. start_index = 0 # Process batches of image-files. while start_index < num_images: # Print the percentage-progress. print_progress(count=start_index, max_count=num_images) # End-index for this batch. end_index = start_index + batch_size # Ensure end-index is within bounds. if end_index > num_images: end_index = num_images # The last batch may have a different batch-size. current_batch_size = end_index - start_index # Load all the images in the batch. for i, filename in enumerate(filenames[start_index:end_index]): # Path for the image-file. path = os.path.join(data_dir, filename) # Load and resize the image. # This returns the image as a numpy-array. img = load_image(path, size=img_size) # Save the image for later use. image_batch[i] = img # Use the pre-trained image-model to process the image. # Note that the last batch may have a different size, # so we only use the relevant images. transfer_values_batch = \ image_model_transfer.predict(image_batch[0:current_batch_size]) # Save the transfer-values in the pre-allocated array. transfer_values[start_index:end_index] = \ transfer_values_batch[0:current_batch_size] # Increase the index for the next loop-iteration. start_index = end_index # Print newline. print() return transfer_values
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Helper-function for processing all images in the training-set. This saves the transfer-values in a cache-file for fast reloading.
def process_images_train(): print("Processing {0} images in training-set ...".format(len(filenames_train))) # Path for the cache-file. cache_path = os.path.join(coco.data_dir, "transfer_values_train.pkl") # If the cache-file already exists then reload it, # otherwise process all images and save their transfer-values # to the cache-file so it can be reloaded quickly. transfer_values = cache(cache_path=cache_path, fn=process_images, data_dir=coco.train_dir, filenames=filenames_train) return transfer_values
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Helper-function for processing all images in the validation-set.
def process_images_val(): print("Processing {0} images in validation-set ...".format(len(filenames_val))) # Path for the cache-file. cache_path = os.path.join(coco.data_dir, "transfer_values_val.pkl") # If the cache-file already exists then reload it, # otherwise process all images and save their transfer-values # to the cache-file so it can be reloaded quickly. transfer_values = cache(cache_path=cache_path, fn=process_images, data_dir=coco.val_dir, filenames=filenames_val) return transfer_values
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Process all images in the training-set and save the transfer-values to a cache-file. This took about 30 minutes to process on a GTX 1070 GPU.
%%time transfer_values_train = process_images_train() print("dtype:", transfer_values_train.dtype) print("shape:", transfer_values_train.shape)
_____no_output_____
Apache-2.0
Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb
arindamdeyofficial/Amazon_Review_Sentiment_Analysys
Parameter extraction
# Stride length and stride duration print("len(disp_abs_all):", len(disp_abs_all)) print("disp_abs_all[0].shape:", disp_abs_all[0].shape) import copy from scipy import signal disp_abs_all_savgol = copy.deepcopy(disp_abs_all) file_id = 0 seg = 0 disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2) plt.figure() plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g') plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m') plt.show() import copy from scipy import signal disp_abs_all_savgol = copy.deepcopy(disp_abs_all) for file_id in range(len(disp_abs_all)): savgol0 = [] for seg in range(len(disp_abs_all[0])): disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2) plt.figure() plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g') plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m') plt.show() import peakutils from scipy.signal import argrelmax xmin = 0 xmax = 6 bins = 120 width = (xmax-xmin)/bins stride_all = [] for file_id in range(len(disp_abs_all)): stride_seg = [] for seg in range(10): stride_seg0 = [] hist_dat = np.histogram(disp_abs_all_savgol[file_id][seg,:,1], bins=120,range=(0,6)) #peaks = hist_dat[1][argrelmax(hist_dat[0], order=4)] peaks_id = peakutils.indexes(hist_dat[0], thres=0.2, min_dist=5) peaks_id = np.sort(peaks_id) peaks = hist_dat[1][peaks_id] for peak_id in range(len(peaks)): dat0 = disp_abs_all[file_id][seg] disp_peak = [dat0[i,1] for i in range(len(dat0)) if dat0[i,1] > peaks[peak_id] and dat0[i,1] < peaks[peak_id] + width] time_peak = [dat0[i,0] for i in range(len(dat0)) if dat0[i,1] > peaks[peak_id] and dat0[i,1] < peaks[peak_id] + width] disp_peak_med = np.median(disp_peak) time_peak_med = np.median(time_peak) stride_seg0.append([time_peak_med, disp_peak_med]) stride_seg.append(np.array(stride_seg0)) stride_all.append(stride_seg) plt.figure() for seg in range(10): plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1]) plt.plot(stride_all[file_id][seg][:,0], stride_all[file_id][seg][:,1], 'o') plt.title("Displacement of file {0}".format(src_name[file_id])) plt.xlabel("Time (sec)") plt.ylabel("Displacement (mm)") plt.xlim([0,4.2]) plt.ylim([0,6.2]) plt.xticks([0,1,2,3,4]) plt.savefig(src_path + "img/201102_stride_length_detection_" + src_name[file_id] + ".png") plt.close() import pickle with open(src_path + "pickle/initial_disp_all_201102.pickle", "wb") as f1: pickle.dump(initial_disp_all, f1) with open(src_path + "pickle/disp_rel_all_201102.pickle", "wb") as f2: pickle.dump(disp_rel_all, f2) with open(src_path + "pickle/disp_abs_all_201102.pickle", "wb") as f3: pickle.dump(disp_abs_all, f3) with open(src_path + "pickle/seg_len_all_201102.pickle", "wb") as f4: pickle.dump(seg_len_all, f4) with open(src_path + "pickle/stride_all_201102.pickle", "wb") as f5: pickle.dump(stride_all, f5) with open(src_path + "pickle/body_len_all_201104.pickle", "wb") as f6: pickle.dump(body_len_all, f6) print("len(initial_disp_all):", len(initial_disp_all)) print("len(initial_disp_all[0]) (seg number):", len(initial_disp_all[0])) print("len(disp_rel_all):", len(disp_rel_all)) print("disp_rel_all[0].shape:", disp_rel_all[0].shape) print("len(disp_abs_all):", len(disp_abs_all)) print("disp_abs_all[0].shape:", disp_abs_all[0].shape) print("len(seg_len_all):", len(seg_len_all)) print("seg_len_all[0].shape:", seg_len_all[0].shape) print("len(stride_all)(movie number):", len(stride_all)) print("len(stride_all[0])(seg number):", len(stride_all[0])) print("len(stride_all[0][0])(peak number):", len(stride_all[0][0])) print("len(stride_all[0][0][0])(time, displacement):", len(stride_all[0][0][0])) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/stride_all_201102.pickle", "rb") as f5: stride_all = pickle.load(f5) import numpy as np stride_length_all = [] for mov_id in range(len(stride_all)): dst1 = [] for seg_id in range(10): dat_stride = stride_all[mov_id][seg_id] dst0 = [] for i in range(len(dat_stride)-1): dst0.append(dat_stride[i+1,1]-dat_stride[i,1]) dst1.append(np.median(dst0)) stride_length_all.append(dst1) print(stride_length_all) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/stride_length_all_201104.pickle", "wb") as f7: pickle.dump(stride_length_all, f7) import numpy as np import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/stride_length_all_201104.pickle", "rb") as f6: stride_length_all = np.array(pickle.load(f6)) print("stride_length_all.shape", stride_length_all.shape) stride_len_med = [] for i in range(len(stride_length_all)): stride_len_med.append(np.median(stride_length_all[i])) print("median stride length of movie{0}: {1:3f}".format(i, np.median(stride_length_all[i]))) with open(src_path + "pickle/body_len_all_201104.pickle", "rb") as f6: body_len_all = pickle.load(f6) body_len_max = [] for file_id in range(len(body_len_all)): body_len_max.append(body_len_all[file_id][:,1].max()) print("body_len_max:", body_len_max) print("stride_length_med:", stride_len_med) import matplotlib.pyplot as plt from scipy import stats plt.plot(body_len_max, stride_len_med, 'go') plt.xlim([2,5]) plt.xlabel("Body length (mm)") plt.ylim([0.5,1.0]) plt.ylabel("Stride length (mm)") plt.show() print("Body length average (mm):{0:4.2f}±{1:4.2f}".format(np.mean(body_len_max), stats.sem(body_len_max))) print("Stride length average (mm):{0:4.2f}±{1:4.2f}".format(np.mean(stride_len_med), stats.sem(stride_len_med))) print("len(seg_len_all):", len(seg_len_all)) print("seg_len_all[0].shape: (seg, frame, time/length)", seg_len_all[0].shape) import copy import matplotlib.pyplot as plt import peakutils from scipy import signal seg_len_savgol = [] seg_len_peaks = [] for file_id in range(len(seg_len_all)): seg_len_savgol0 = [] seg_len_peaks0 = [] for seg in range(len(seg_len_all[file_id])): dat = seg_len_all[file_id][seg] dat_savgol = copy.deepcopy(dat) dat_savgol[:,1] = signal.savgol_filter(dat[:,1],11,2) peaks_id_p = peakutils.indexes(dat_savgol[:,1], thres=0.2, min_dist=20) peaks_id_n = peakutils.indexes(-dat_savgol[:,1], thres=0.2, min_dist=20) seg_len_savgol0.append(dat_savgol) seg_len_peaks0.append([peaks_id_p, peaks_id_n]) seg_len_savgol.append(seg_len_savgol0) seg_len_peaks.append(seg_len_peaks0) file_id = 0 seg = 0 dat_src = seg_len_all[file_id][seg] dat_sav = seg_len_savgol[file_id][seg] dat_peaks = seg_len_peaks[file_id][seg] plt.plot(dat_src[:,0], dat_src[:,1]) plt.plot(dat_sav[:,0], dat_sav[:,1]) plt.plot(dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1], 'go') plt.plot(dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1], 'mo') plt.savefig(src_path + "img/201104_segment_length_{0}_seg{1}.png".format(src_name[file_id], seg)) plt.show() seg_len_range_all = [] for file_id in range(len(seg_len_all)): dst = [] for seg in range(len(seg_len_all[file_id])): dat_src = seg_len_all[file_id][seg] dat_sav = seg_len_savgol[file_id][seg] dat_peaks = seg_len_peaks[file_id][seg] dst_p = [dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1]] dst_n = [dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1]] dst.append([dst_p, dst_n]) plt.plot(dat_src[:,0], dat_src[:,1]) plt.plot(dat_sav[:,0], dat_sav[:,1]) plt.plot(dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1], 'go') plt.plot(dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1], 'mo') plt.savefig(src_path + "img/201104_segment_length_{0}_seg{1}.png".format(src_name[file_id], seg)) plt.close() seg_len_range_all.append(dst) import pickle with open(src_path + "pickle/seg_len_range_all_201104.pickle", "wb") as f: pickle.dump(seg_len_range_all, f) import pickle with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f: seg_len_range_all = pickle.load(f) print("len(seg_len_range_all) (file_id):", len(seg_len_range_all)) print("len(seg_len_range_all[0])(seg):", len(seg_len_range_all[0])) print("len(seg_len_range_all[0][0])(peak/valley)", len(seg_len_range_all[0][0])) print("len(seg_len_range_all[0][0][0])(time/length)", len(seg_len_range_all[0][0][0])) file_id = 0 seg_id = 0 peak = 0 valley = 1 print("seg_len_range_all[file_id][seg][peak]:(time/length)", seg_len_range_all[file_id][seg_id][peak]) print("seg_len_range_all[file_id][seg][valley]:(time/length)", seg_len_range_all[file_id][seg_id][valley]) import numpy as np import peakutils # signal: seg0 = 0 seg1 = 4 sig0 = seg_len_savgol[0][seg0][:,1] sig1 = seg_len_savgol[0][seg1][:,1] # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20) estimated_delay = peaks_id[0] print("estimated delay is {}".format(estimated_delay)) print(peaks_id) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.show() print(len(corr)) import numpy as np import peakutils fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/correlation/" # segmental delay seg_len_delay_all = [] for file_id in range(len(seg_len_savgol)): dst0 = [] for seg_id in range(len(seg_len_savgol[file_id])-1): sig0 = seg_len_savgol[file_id][seg_id][:,1] sig1 = seg_len_savgol[file_id][seg_id+1][:,1] # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") t_margin = 2 peaks_id = peakutils.indexes(corr[len(corr)-len(sig0)-t_margin:], thres=0.2, min_dist=20) peaks_id = peaks_id - t_margin estimated_delay = peaks_id[0] dst0.append(estimated_delay) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.savefig(fig_path + "intersegmental_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id)) plt.close() seg_len_delay_all.append(dst0) # stride duration stride_duration_all = [] for file_id in range(len(seg_len_savgol)): dst0 = [] for seg_id in range(len(seg_len_savgol[file_id])): sig0 = seg_len_savgol[file_id][seg_id][:,1] sig1 = seg_len_savgol[file_id][seg_id][:,1] # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20) estimated_delay = peaks_id[0] dst0.append(estimated_delay) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.savefig(fig_path + "auto_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id)) plt.close() stride_duration_all.append(dst0) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/seg_len_delay_all_201104.pickle", "wb") as f8: pickle.dump(seg_len_delay_all, f8) with open(src_path + "pickle/stride_duration_all_201104.pickle", "wb") as f9: pickle.dump(stride_duration_all, f9) import pickle with open(src_path + "pickle/seg_len_delay_all_201104.pickle", "rb") as f8: seg_len_delay_all = pickle.load(f8) with open(src_path + "pickle/stride_duration_all_201104.pickle", "rb") as f9: stride_duration_all = pickle.load(f9) print("From cross-correlation") print("len(seg_len_delay_all):", len(seg_len_delay_all)) print("len(seg_len_delay_all[0])(seg):", len(seg_len_delay_all[0])) print("seg_len_delay_all[0]:", seg_len_delay_all[0]) print("From auto-correlation") print("len(stride_duration_all):", len(stride_duration_all)) print("len(stride_duration_all[0])(seg):", len(stride_duration_all[0])) print("stride_duration_all[0]:", stride_duration_all[0]) # boundary stride duration 201119 import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/disp_abs_all_201102.pickle", "rb") as f: disp_abs_all = pickle.load(f) import copy from scipy import signal disp_abs_all_savgol = copy.deepcopy(disp_abs_all) for file_id in range(len(disp_abs_all)): savgol0 = [] for seg in range(len(disp_abs_all[0])): disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2) import matplotlib.pyplot as plt file_id = 0 seg = 0 plt.figure() plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g') plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m') plt.show() import numpy as np diff = np.diff(disp_abs_all_savgol[file_id][seg,:,1]) plt.plot(diff) plt.show() import numpy as np import peakutils # signal: sig0 = diff sig1 = diff # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20) estimated_delay = peaks_id[0] print("estimated delay is {}".format(estimated_delay)) print(peaks_id) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.show() print(len(corr)) import copy from scipy import signal disp_abs_all_savgol = copy.deepcopy(disp_abs_all) for file_id in range(len(disp_abs_all)): savgol0 = [] for seg in range(len(disp_abs_all[0])): disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2) import numpy as np diff = np.diff(disp_abs_all_savgol[file_id][seg,:,1]) plt.plot(diff) plt.show() import numpy as np import peakutils # source: disp_abs_all_savgol fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/correlation/" src_name = ["Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results3-1-74-20.csv", "Results4-248-370-20.csv", "Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv", "Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"] # bounday motion delay boundary_motion_delay_all = [] for file_id in range(len(disp_abs_all_savgol)): dst0 = [] for seg_id in range(len(disp_abs_all_savgol[file_id])-1): sig0 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1]) sig1 = np.diff(disp_abs_all_savgol[file_id][seg_id+1][:,1]) # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") t_margin = 2 peaks_id = peakutils.indexes(corr[len(corr)-len(sig0)-t_margin:], thres=0.2, min_dist=20) peaks_id = peaks_id - t_margin estimated_delay = peaks_id[0] dst0.append(estimated_delay) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.savefig(fig_path + "201119_boundary_motion_interseg_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id)) plt.close() boundary_motion_delay_all.append(dst0) # boundary stride duration boundary_stride_duration_all = [] for file_id in range(len(disp_abs_all_savgol)): dst0 = [] for seg_id in range(len(disp_abs_all_savgol[file_id])): sig0 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1]) sig1 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1]) # centralization sig0 = sig0 - sig0.mean() sig1 = sig1 - sig1.mean() corr = np.correlate(sig1, sig0, "full") peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20) estimated_delay = peaks_id[0] dst0.append(estimated_delay) fig, ax = plt.subplots(2,1, figsize = (10,8)) ax[0].plot(sig0, label="sig0") ax[0].plot(sig1, label="sig1") ax[0].legend() ax[1].set_ylabel("corr") ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr) ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro') ax[1].set_xlim([0, len(sig1)]) plt.savefig(fig_path + "201119_boundary_auto_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id)) plt.close() boundary_stride_duration_all.append(dst0) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/boundary_motion_delay_all_201119.pickle", "wb") as f1: pickle.dump(boundary_motion_delay_all, f1) with open(src_path + "pickle/boundary_stride_duration_all_201119.pickle", "wb") as f2: pickle.dump(boundary_stride_duration_all, f2) boundary_stride_duration_all = np.array(boundary_stride_duration_all) print("boundary_stride_duration_all", boundary_stride_duration_all.shape) print(boundary_stride_duration_all) boundary_motion_delay_all = np.array(boundary_motion_delay_all) print("boundary_motion_delay_all", boundary_motion_delay_all.shape) print(boundary_motion_delay_all) # Calculate speed import copy from scipy import signal import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/disp_abs_all_201102.pickle", "rb") as f: disp_abs_all = pickle.load(f) disp_abs_all_savgol = copy.deepcopy(disp_abs_all) for file_id in range(len(disp_abs_all)): savgol0 = [] for seg in range(len(disp_abs_all[0])): disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2) import matplotlib.pyplot as plt file_id = 0 seg = 0 plt.figure() plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g') plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m') plt.show() import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression lr = LinearRegression() file_id = 0 seg = 0 X = disp_abs_all_savgol[file_id][seg,:,0].reshape(-1,1) Y = disp_abs_all_savgol[file_id][seg,:,1].reshape(-1,1) lr.fit(X, Y) plt.scatter(X, Y, color='green') plt.plot(X, lr.predict(X), color='magenta') plt.show() print("coefficient:", lr.coef_[0]) print(X) print(Y) print(Y.reshape(-1,1)) # Calculate all speed import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/" src_name = ["Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results3-1-74-20.csv", "Results4-248-370-20.csv", "Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv", "Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"] speed_all = [] for file_id in range(len(disp_abs_all_savgol)): dst = [] for seg_id in range(len(disp_abs_all_savgol[file_id])): lr = LinearRegression() X = disp_abs_all_savgol[file_id][seg_id,:,0].reshape(-1,1) Y = disp_abs_all_savgol[file_id][seg_id,:,1].reshape(-1,1) lr.fit(X, Y) plt.plot(X, Y, color='green') plt.plot(X, lr.predict(X), color='magenta') plt.savefig(fig_path + "201120_speed_{0}_seg{1}.png".format(src_name[file_id], seg_id)) plt.close() dst.append(lr.coef_[0][0]) speed_all.append(dst) speed_all = np.array(speed_all) print("speed_all.shape:", speed_all.shape) print(speed_all) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" #with open(src_path + "pickle/speed_all_201120.pickle", "wb") as f: # pickle.dump(speed_all, f) import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" with open(src_path + "pickle/speed_all_201120.pickle", "rb") as f: speed_all = pickle.load(f) speed_larvae = speed_all.mean(axis=1) print("speed_larvae.shape:", speed_larvae.shape) print(speed_larvae) # Scatter plot of speed vs stride duration/length # data of speed: speed_all # data of stride duration: boundary_stride_duration_all # data of stride length: stride_length_all import numpy as np import pickle src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/" sec_per_frame = 0.03333 with open(src_path + "pickle/speed_all_201120.pickle", "rb") as f1: speed_all = pickle.load(f1) with open(src_path + "pickle/boundary_stride_duration_all_201119.pickle", "rb") as f2: stride_duration_all = pickle.load(f2) stride_duration_all = np.array(stride_duration_all) * sec_per_frame with open(src_path + "pickle/stride_length_all_201104.pickle", "rb") as f3: stride_length_all = pickle.load(f3) stride_length_all = np.array(stride_length_all) print("speed_all:", speed_all.shape) print("stride_duration_all:", stride_duration_all.shape) print("stride_length_all:", stride_length_all.shape) import matplotlib.pyplot as plt dst_path = "C:/Users/h1006/Documents/Research/Sun/Images/" speed = speed_all.reshape(11*10) duration = stride_duration_all.reshape(11*10) length = stride_length_all.reshape(11*10) plt.figure(figsize = (8,9)) ax = plt.gca() plt.plot(duration, speed, 'o', color = "k", markersize = 10) plt.xlim([0.7, 1.45]) plt.ylim([0.45, 1.0]) plt.xlabel("Stride duration (sec)", fontsize = 28) plt.ylabel("Speed (mm/sec)", fontsize = 28) plt.xticks([0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4],fontsize = 20) plt.yticks([0.5,0.6,0.7,0.8,0.9,1.0], fontsize = 20) ax.spines["top"].set_color("none") ax.spines["right"].set_color("none") plt.savefig(dst_path + "Speed_vs_stride_duration_201120.png", bbox_inches = "tight", facecolor="white") plt.show() plt.close() plt.figure(figsize = (8,9)) ax = plt.gca() plt.plot(length, speed, 'o', color = "k", markersize = 10) plt.xlim([0.5, 0.9]) plt.ylim([0.45, 1.0]) plt.xlabel("Stride length (mm)", fontsize = 28) plt.ylabel("Speed (mm/sec)", fontsize = 28) plt.xticks([0.5,0.6,0.7,0.8,0.9], fontsize = 20) plt.yticks([0.5,0.6,0.7,0.8,0.9,1.0], fontsize = 20) ax.spines["top"].set_color("none") ax.spines["right"].set_color("none") plt.savefig(dst_path + "Speed_vs_stride_length_201120.png", bbox_inches = "tight", facecolor="white") plt.show() plt.close() import pandas as pd speed_series = pd.Series(speed) duration_series = pd.Series(duration) length_series = pd.Series(length) Corr_duration = speed_series.corr(duration_series) Corr_length = speed_series.corr(length_series) print("Correlation speed vs duration:", Corr_duration) print("Correlation speed vs length:", Corr_length) # Calculate maximum and minimum segment length # seg_len_all: file_id, seg_id, frame [time, length]; 11 x 9 x frames x 2 # seg_len_range_all: file_id, seg_id, peak/valley, point number: 11 x 9 x 2 x point number import pickle with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f1: seg_len_range_all = pickle.load(f1) with open(src_path + "pickle/seg_len_all_201102.pickle", "rb") as f2: seg_len_all = pickle.load(f2) file_id = 0 seg_id = 4 dat = seg_len_range_all[file_id][seg_id] seg_max = dat[0][1].max() seg_min = dat[1][1].min() print("seg_len_range_all[file_id][seg_Id]:", dat) print("dat[0][1].max():", dat[0][1].max()) print("dat[1][1].min():", dat[1][1].min()) import numpy as np max_len_all = [] min_len_all = [] for file_id in range(len(seg_len_range_all)): dst_max = [] dst_min = [] for seg_id in range(len(seg_len_range_all[file_id])): dat = seg_len_range_all[file_id][seg_id] dst_max.append(dat[0][1].max()) dst_min.append(dat[1][1].min()) max_len_all.append(dst_max) min_len_all.append(dst_min) max_len_all = np.array(max_len_all) min_len_all = np.array(min_len_all) print(max_len_all) print(min_len_all) import matplotlib.pyplot as plt import matplotlib.cm as cm plt.figure(0, figsize=(6,10)) plot_shift = 0.5 for seg in range(9): plt.plot(max_len_all[:,seg],[seg+plot_shift]*11, color=cm.jet((seg+1)/10), marker='^', linestyle='None', markersize=15) plt.plot(min_len_all[:,seg],[seg]*11, color=cm.jet((seg+1)/10), marker='v', linestyle='None', markersize=15) plt.plot([max_len_all[:,seg], min_len_all[:,seg]], [seg+plot_shift, seg], color=cm.jet((seg+1)/10), linewidth=1, linestyle="dotted") plt.title("Segment length range") plt.xlabel("Segment length (mm)", fontsize=30) plt.xlim([0,0.6]) #plt.ylim([0,6]) #plt.xticks([0,1,2,3]) plt.yticks([]) plt.tick_params(labelsize=24) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') #plt.legend() plt.savefig(dst_path + "Segment_length_range_201120.png", facecolor="white", bbox_inches = "tight") plt.show() import pickle with open(src_path + "pickle/max_len_all_201120.pickle", "wb") as f1: #pickle.dump(max_len_all, f1) with open(src_path + "pickle/min_len_all_201120.pickle", "wb") as f2: #pickle.dump(min_len_all, f2) # Calculate contraction duration import pickle with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f1: seg_len_range_all = pickle.load(f1) with open(src_path + "pickle/seg_len_all_201102.pickle", "rb") as f2: seg_len_all = pickle.load(f2) with open(src_path + "pickle/max_len_all_201120.pickle", "rb") as f3: max_len_all = pickle.load(f3) with open(src_path + "pickle/min_len_all_201120.pickle", "rb") as f4: min_len_all = pickle.load(f4) # Check max and min in segment length data # seg0 (A8) - seg8 (T3) # select valleys # Result1: 1,1,0,0,0,0,0,0,0 # Result2: 1,1,1,1,1,1,1,1,1 # Result3: 1,1,1,1,1,1,0,0,0 # Result4: 3,2,2,2,2,2,2,2,3 # Result5: 2,2,2,2,2,2,2,2,2 # Result6: 0,1,1,1,1,1,1,1,1 # Result7: 1,1,1,1,1,1,1,1,1 # Result8: 1,1,1,1,1,1,1,1,1 # Result9: 1,1,1,1,1,1,1,1,1 # Result10: 1,1,1,1,1,1,1,1,1 # Result11: 1,1,1,1,1,0,0,0,0 valleys = np.array([[1,1,0,0,0,0,0,0,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,0,0,1], [3,2,2,2,2,2,2,2,3], [2,2,2,2,2,2,2,2,2], [0,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,0,0,0,0]]) # Calculate contraction duration # seg_len_all: file_id, seg_id, frame [time, length]; 11 x 9 x frames x 2 # seg_len_range_all: file_id, seg_id, peak/valley, point number: 11 x 9 x 2 x point number import matplotlib.pyplot as plt from scipy import signal file_id = 0 seg_id = 2 t = seg_len_all[file_id][seg_id][:,0] length = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1], 11, 2) peaks = seg_len_range_all[file_id][seg_id] plt.plot(t, length) plt.plot(peaks[0][0], peaks[0][1], 'go') plt.plot(peaks[1][0], peaks[1][1], 'mo') plt.show() from scipy import signal file_id = 0 seg_id = 2 dat_t = seg_len_all[file_id][seg_id][:,0] dat_l = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1],11,2) valley_point = seg_len_range_all[file_id][seg_id][1][0][valleys[file_id][seg_id]] idx = np.where(dat_t == valley_point)[0] thrd = (max_len_all[file_id][seg_id] - min_len_all[file_id][seg_id])*0.5 + min_len_all[file_id][seg_id] # search for left idx left_ = 0 while(dat_l[idx-left_]<thrd): left_ += 1 idx_left = idx - left_ # search for right idx right_ = 0 while(dat_l[idx+right_]<thrd): right_ += 1 idx_right = idx + right_ time_left = dat_t[idx_left] time_right = dat_t[idx_right] dst0 = [[time_left, time_right], [idx_left, idx_right]] print(dst0) plt.plot(dat_t, dat_l) plt.plot(dat_t[idx_left], dat_l[idx_left], "go") plt.plot(dat_t[idx_right], dat_l[idx_right], "go") plt.show() print("thrd:", thrd) print("left side:", dat_l[idx_left-1], dat_l[idx_left], dat_l[idx_left+1]) print("right side:", dat_l[idx_right-1], dat_l[idx_right], dat_l[idx_right+1]) # Calculate contraction duration from scipy import signal FWHM_segment_length_all = [] for file_id in range(11): dst = [] for seg_id in range(9): dat_t = seg_len_all[file_id][seg_id][:,0] dat_l = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1],11,2) valley_point = seg_len_range_all[file_id][seg_id][1][0][valleys[file_id][seg_id]] idx = np.where(dat_t == valley_point)[0] thrd = (max_len_all[file_id][seg_id] - min_len_all[file_id][seg_id])*0.5 + min_len_all[file_id][seg_id] # search for left idx left_ = 0 while(dat_l[idx-left_]<thrd): left_ += 1 idx_left = idx - left_ # search for right idx right_ = 0 while(dat_l[idx+right_]<thrd): right_ += 1 idx_right = idx + right_ time_left = dat_t[idx_left] time_right = dat_t[idx_right] dst0 = [[time_left[0], time_right[0]], [int(idx_left[0]), int(idx_right[0])]] dst.append(dst0) FWHM_segment_length_all.append(dst) FWHM_segment_length_all = np.array(FWHM_segment_length_all) FWHM_segment_length_all.shape contraction_duration_all = [] for file_id in range(11): dst = [] for seg_id in range(9): dat = FWHM_segment_length_all[file_id][seg_id] dst.append(dat[0,1] - dat[0,0]) contraction_duration_all.append(dst) contraction_duration_all = np.array(contraction_duration_all) print("contraction_duration_all", contraction_duration_all) import matplotlib.pyplot as plt import matplotlib.cm as cm plt.figure(0, figsize=(6,10)) plot_shift = 0.5 for seg in range(1,9): plt.plot(contraction_duration_all[:,seg], np.array([seg-1]*11) + np.random.randn(11)*0.07, color=cm.jet((seg+1)/10), marker='o', linestyle='None', markersize=10) plt.plot([0,0.7], [seg-1, seg-1], color=cm.jet((seg+1)/10), linestyle='dotted') plt.title("Contraction duration") plt.xlabel("Contraction duration (sec)", fontsize=30) plt.xlim([0,0.7]) #plt.ylim([0,6]) plt.xticks([0,0.2, 0.4, 0.6]) plt.yticks([]) plt.tick_params(labelsize=24) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') #plt.legend() plt.savefig(dst_path + "Contraction_duration_201120.png", facecolor="white", bbox_inches = "tight") plt.show()
_____no_output_____
Unlicense
Locomotion dynamcis/1_Kinemtaics_201102.ipynb
AlbertLordsun/Physical_measurement
Thematic ReportsThematic reports run historical analyses on the exposure of a portfolio to various Goldman Sachs Flagship Thematic baskets over a specified date range. PrerequisiteTo execute all the code in this tutorial, you will need the following application scopes:- **read_product_data**- **read_financial_data**- **modify_financial_data** (must be requested)- **run_analytics** (must be requested)If you are not yet permissioned for these scopes, please request them on your [My Applications Page](https://developer.gs.com/go/apps/view).If you have any other questions please reach out to the [Marquee sales team](mailto:[email protected]). Step 1: Authenticate and Initialize Your SessionFirst you will import the necessary modules and add your client id and client secret.
import datetime as dt from time import sleep from gs_quant.markets.baskets import Basket from gs_quant.markets.report import ThematicReport from gs_quant.session import GsSession, Environment client = None secret = None scopes = None ## External users must fill in their client ID and secret below and comment out the line below #client = 'ENTER CLIENT ID' #secret = 'ENTER CLIENT SECRET' #scopes = ('read_product_data read_financial_data modify_financial_data run_analytics',) GsSession.use( Environment.PROD, client_id=client, client_secret=secret, scopes=scopes ) print('GS Session initialized.')
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
Step 2: Create a New Thematic Report Already have a thematic report?If you want to skip creating a new report and continue this tutorial with an existing thematic report, run the following and skip to Step 3:
thematic_report_id = 'ENTER THEMATIC REPORT ID' thematic_report = ThematicReport.get(thematic_report_id)
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
The only parameter necessary in creating a new thematic report is the unique Marquee identifier of the portfolio on which you would like to run thematic analytics.
portfolio_id = 'ENTER PORTFOLIO ID' thematic_report = ThematicReport(position_source_id=portfolio_id) thematic_report.save() print(f'A new thematic report for portfolio "{portfolio_id}" has been made with ID "{thematic_report.id}".')
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
Step 3: Schedule the ReportWhen scheduling reports, you have two options:- Backcast the report: Take the earliest date with positions in the portfolio / basket and run the report on the positions held then with a start date before the earliest position date and an end date of the earliest position date- Do not backcast the report: Set the start date as a date that has positions in the portfolio or basket and an end date after that (best practice is to set it to T-1). In this case the report will run on positions held as of each day in the date rangeIn this case, let's try scheduling the report without backcasting:
start_date = dt.date(2021, 1, 4) end_date = dt.date(2021, 8, 4) thematic_report.schedule( start_date=start_date, end_date=end_date, backcast=False ) print(f'Report "{thematic_report.id}" has been scheduled.')
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
Alternative Step 3: Run the ReportDepending on the size of your portfolio and the length of the schedule range, it usually takes anywhere from a couple seconds to half a minute for your report to finish executing.Only after that can you successfully pull the results from that report. If you would rather run the report and pull the results immediately after they are ready, you can leverage the `run`function.You can run a report synchronously or asynchronously.- Synchronous: the Python script will stall at the `run` function line and wait for the report to finish. The `run` function will then return a dataframe with the report results- Asynchronously: the Python script will not stall at the `run` function line. The `run` function will return a `ReportJobFuture` object that will contain the report results when they are ready.In this example, let's run the report asynchronously and wait for the results:
start_date = dt.date(2021, 1, 4) end_date = dt.date(2021, 8, 4) report_result_future = thematic_report.run( start_date=start_date, end_date=end_date, backcast=False, is_async=True ) while not report_result_future.done(): print('Waiting for report results...') sleep(5) print('\nReport results done! Here they are...') print(report_result_future.result())
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
Step 3: Pull Report ResultsNow that we have our factor risk report, we can leverage the unique functionalities of the `ThematicReport` class to pull exposure and PnL data. Let's get the historical changes in thematic exposure and beta to the GS Asia Stay at Home basket:
basket = Basket.get('GSXASTAY') thematic_exposures = thematic_report.get_thematic_data( start_date=start_date, end_date=end_date, basket_ids=[basket.get_marquee_id()] ) print(f'Thematic Exposures: \n{thematic_exposures.__str__()}') thematic_exposures.plot(title='Thematic Data Breakdown')
_____no_output_____
Apache-2.0
gs_quant/documentation/10_one_delta/reports/Thematic Report.ipynb
daniel-schreier/gs-quant
The Generator The generator, G, is designed to map the latent space vector (z) to data-space. Since our data are images, converting z to data-space means ultimately creating a RGB image with the same size as the training images (i.e. 3x32x32). In practice, this is accomplished through a series of strided two dimensional convolutional transpose layers, each paired with a 2d batch norm layer and a relu activation. The output of the generator is fed through a tanh function to return it to the input data range of [−1,1]. It is worth noting the existence of the batch norm functions after the conv-transpose layers, as this is a critical contribution of the DCGAN paper. These layers help with the flow of gradients during training. An image of the generator from the DCGAN paper is shown below.
# Generator Code class Generator(nn.Module): def __init__(self, ngpu): super(Generator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, nc, kernel_size=1, stride=1, padding=0, bias=False), nn.Tanh() ) def forward(self, input): return self.main(input) # Create the generator netG = Generator(ngpu).to(device) # Print the model print(netG) # Input shape for the DCGAN generator is the variable of shape (1, 100, 1, 1, ). # There ara nothing important about this shape and you can change it to other numbers # by modifying `nz` variable. (ex. 128, 200, etc). # Lets check that GAN generates image with correct shape (1, 3, 32, 32) input_variable = torch.randn((1, 100, 1, 1, )).to(device) netG(input_variable).shape
_____no_output_____
MIT
Deep-Fake-knu-2020/Part_2-Generative-Adversarial-Networks/dc-gan-tutorial.ipynb
kryvokhyzha/examples-and-courses
The DiscriminatorAs mentioned, the discriminator, D, is a binary classification network that takes an image as input and outputs a scalar probability that the input image is real (as opposed to fake). Here, D takes a 3x64x64 input image, processes it through a series of Conv2d, BatchNorm2d, and LeakyReLU layers, and outputs the final probability through a Sigmoid activation function. This architecture can be extended with more layers if necessary for the problem, but there is significance to the use of the strided convolution, BatchNorm, and LeakyReLUs. The DCGAN paper mentions it is a good practice to use strided convolution rather than pooling to downsample because it lets the network learn its own pooling function. Also batch norm and leaky relu functions promote healthy gradient flow which is critical for the learning process of both G and D.
class Discriminator(nn.Module): def __init__(self, ngpu): super(Discriminator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, 1, 2, 2, 0, bias=False), nn.Sigmoid() ) def forward(self, input): return self.main(input) # Create the Discriminator netD = Discriminator(ngpu).to(device) # Print the model print(netD) # Discriminator is the model that should predict single number from input image. # This number is the probability of input being fake. # Lets check that Discriminator will return single number from input of size (1, 3, 32, 32) input_variable = torch.randn((1, 3, 32, 32, )).to(device) netD(input_variable) # Initialize BCELoss function # This is the lost function used in DCGAN criterion = nn.BCELoss() # Create batch of latent vectors that we will use to visualize # the progression of the generator fixed_noise = torch.randn(64, nz, 1, 1, device=device) # Establish convention for real and fake labels during training real_label = 1 fake_label = 0 # Setup Adam optimizers for both G and D optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999)) optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999)) # Training Loop # Lists to keep track of progress img_list = [] G_losses = [] D_losses = [] iters = 0 print("Starting Training Loop...") # For each epoch for epoch in range(num_epochs): # For each batch in the dataloader for i, data in enumerate(dataloader, 0): ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### ## Train with all-real batch netD.zero_grad() # Format batch real_cpu = data[0].to(device) b_size = real_cpu.size(0) label = torch.full((b_size,), real_label, device=device) # Forward pass real batch through D output = netD(real_cpu).view(-1) # Calculate loss on all-real batch errD_real = criterion(output, label) # Calculate gradients for D in backward pass errD_real.backward() D_x = output.mean().item() ## Train with all-fake batch # Generate batch of latent vectors noise = torch.randn(b_size, nz, 1, 1, device=device) # Generate fake image batch with G fake = netG(noise) label.fill_(fake_label) # Classify all fake batch with D output = netD(fake.detach()).view(-1) # Calculate D's loss on the all-fake batch errD_fake = criterion(output, label) # Calculate the gradients for this batch errD_fake.backward() D_G_z1 = output.mean().item() # Add the gradients from the all-real and all-fake batches errD = errD_real + errD_fake # Update D optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### netG.zero_grad() label.fill_(real_label) # fake labels are real for generator cost # Since we just updated D, perform another forward pass of all-fake batch through D output = netD(fake).view(-1) # Calculate G's loss based on this output errG = criterion(output, label) # Calculate gradients for G errG.backward() D_G_z2 = output.mean().item() # Update G optimizerG.step() # Output training stats if i % 50 == 0: print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f' % (epoch+1, num_epochs, i, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2)) # Save Losses for plotting later G_losses.append(errG.item()) D_losses.append(errD.item()) # Check how the generator is doing by saving G's output on fixed_noise if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)): with torch.no_grad(): fake = netG(fixed_noise).detach().cpu() img_list.append(vutils.make_grid(fake, padding=2, normalize=True)) iters += 1 plt.figure(figsize=(10,5)) plt.title("Generator and Discriminator Loss During Training") plt.plot(G_losses,label="G") plt.plot(D_losses,label="D") plt.xlabel("iterations") plt.ylabel("Loss") plt.legend() plt.show() #%%capture fig = plt.figure(figsize=(8,8)) plt.axis("off") ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list] ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True) HTML(ani.to_jshtml()) # Grab a batch of real images from the dataloader real_batch = next(iter(dataloader)) # Plot the real images plt.figure(figsize=(15,15)) plt.subplot(1,2,1) plt.axis("off") plt.title("Real Images") plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0))) # Plot the fake images from the last epoch plt.subplot(1,2,2) plt.axis("off") plt.title("Fake Images") plt.imshow(np.transpose(img_list[-1],(1,2,0))) plt.show()
_____no_output_____
MIT
Deep-Fake-knu-2020/Part_2-Generative-Adversarial-Networks/dc-gan-tutorial.ipynb
kryvokhyzha/examples-and-courses
Looking at the Pictures*Curtis Miller*In this notebook we see the images in our dataset and create some helper tools for managing the data. First, let's load in the needed libraries.
import numpy as np import pandas as pd import cv2 import matplotlib.pyplot as plt import matplotlib %matplotlib inline
_____no_output_____
MIT
LookingPictures.ipynb
PacktPublishing/Applications-of-Statistical-Learning-with-Python
The faces are stored in a CSV file `fer2013.csv`, loaded in next.
faces = pd.read_csv("fer2013.csv") faces faces.Usage.value_counts()
_____no_output_____
MIT
LookingPictures.ipynb
PacktPublishing/Applications-of-Statistical-Learning-with-Python
The faces themselves are in the `pixels` column of the `DataFrame`, in a string. We want to convert the faces to NumPy 48x48 arrays that can be plotted with matplotlib. The values themselves are the intensities of grayscale pixels. We split the strings on spaces and convert characters to their corresponding numbers, reshaping to a desired array.This is all done with the following function.
def string_to_image(pixelstring): return np.array(pixelstring.split(' '), dtype=np.int16).reshape(48, 48) plt.imshow(string_to_image(faces.pixels[0])) plt.imshow(string_to_image(faces.pixels[8]))
_____no_output_____
MIT
LookingPictures.ipynb
PacktPublishing/Applications-of-Statistical-Learning-with-Python
As humans we would like to know what the codes in the `emotion` column represent. The following dictionary defines the mapping. We won't use it in training but it's useful when presenting.
emotion_code = {0: "angry", 1: "disgust", 2: "fear", 3: "happy", 4: "sad", 5: "surprise", 6: "neutral"}
_____no_output_____
MIT
LookingPictures.ipynb
PacktPublishing/Applications-of-Statistical-Learning-with-Python
Stochastic Block Model Experiment Before geting into the experiment details, let's review algorithm 1 and the primal and dual updates. Algorithm 1 ![title](../algorithm1.png)
# %load algorithm/main.py %time from sklearn.metrics import mean_squared_error from penalty import * def algorithm_1(K, D, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func_name='norm1', calculate_score=False): ''' :param K: the number of iterations :param D: the block incidence matrix :param weight_vec: a list containing the edges's weights of the graph :param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 :param true_labels: a list containing the true labels of the nodes :param samplingset: the sampling set :param lambda_lasso: the parameter lambda :param penalty_func_name: the name of the penalty function used in the algorithm :return iteration_scores: the mean squared error of the predicted weight vectors in each iteration :return new_w: the predicted weigh vectors for each node ''' Sigma = np.diag(np.full(weight_vec.shape, 0.9 / 2)) ''' Sigma: the block diagonal matrix Sigma ''' T_matrix = np.diag(np.array((1.0 / (np.sum(abs(D), 0)))).ravel()) ''' T_matrix: the block diagonal matrix T ''' if np.linalg.norm(np.dot(Sigma ** 0.5, D).dot(T_matrix ** 0.5), 2) > 1: print ('product norm', np.linalg.norm(np.dot(Sigma ** 0.5, D).dot(T_matrix ** 0.5), 2)) E, N = D.shape m, n = datapoints[0]['features'].shape # define the penalty function if penalty_func_name == 'norm1': penalty_func = Norm1Pelanty(lambda_lasso, weight_vec, Sigma, n) elif penalty_func_name == 'norm2': penalty_func = Norm2Pelanty(lambda_lasso, weight_vec, Sigma, n) elif penalty_func_name == 'mocha': penalty_func = MOCHAPelanty(lambda_lasso, weight_vec, Sigma, n) else: raise Exception('Invalid penalty name') # starting algorithm 1 new_w = np.array([np.zeros(n) for i in range(N)]) ''' new_w: the primal variable of the algorithm 1 ''' new_u = np.array([np.zeros(n) for i in range(E)]) ''' new_u: the dual variable of the algorithm 1 ''' iteration_scores = [] for iterk in range(K): # if iterk % 100 == 0: # print ('iter:', iterk) prev_w = np.copy(new_w) # algorithm 1, line 2 hat_w = new_w - np.dot(T_matrix, np.dot(D.T, new_u)) for i in range(N): if i in samplingset: # algorithm 1, line 6 optimizer = datapoints[i]['optimizer'] new_w[i] = optimizer.optimize(datapoints[i]['features'], datapoints[i]['label'], hat_w[i], datapoints[i]['degree']) else: new_w[i] = hat_w[i] # algorithm 1, line 9 tilde_w = 2 * new_w - prev_w new_u = new_u + np.dot(Sigma, np.dot(D, tilde_w)) # algorithm 1, line 10 new_u = penalty_func.update(new_u) # calculate the MSE of the predicted weight vectors if calculate_score: Y_pred = [] for i in range(N): Y_pred.append(np.dot(datapoints[i]['features'], new_w[i])) iteration_scores.append(mean_squared_error(true_labels.reshape(N, m), Y_pred)) # print (np.max(abs(new_w - prev_w))) return iteration_scores, new_w
CPU times: user 3 µs, sys: 1e+03 ns, total: 4 µs Wall time: 7.15 µs
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Primal Update As you see in the algorithm picture, the primal update needs a optimizer operator for the sampling set (line 6). We have implemented the optimizers discussed in the paper, both the logistic loss and squared error loss optimizers implementations with pytorch is available, also we have implemented the squared error loss optimizer using the fixed point equation in the `Networked Linear Regression` section of the paper.
# %load algorithm/optimizer.py import torch import abc import numpy as np from abc import ABC # The linear model which is implemented by pytorch class TorchLinearModel(torch.nn.Module): def __init__(self, n): super(TorchLinearModel, self).__init__() self.linear = torch.nn.Linear(n, 1, bias=False) def forward(self, x): y_pred = self.linear(x) return y_pred # The abstract optimizer model which should have model, optimizer, and criterion as the input class Optimizer(ABC): def __init__(self, model, optimizer, criterion): self.model = model self.optimizer = optimizer self.criterion = criterion @abc.abstractmethod def optimize(self, x_data, y_data, old_weight, regularizer_term): torch_old_weight = torch.from_numpy(np.array(old_weight, dtype=np.float32)) self.model.linear.weight.data = torch_old_weight for iterinner in range(40): self.optimizer.zero_grad() y_pred = self.model(x_data) loss1 = self.criterion(y_pred, y_data) loss2 = 1 / (2 * regularizer_term) * torch.mean((self.model.linear.weight - torch_old_weight) ** 2) # + 10000*torch.mean((model.linear.bias+0.5)**2)#model.linear.weight.norm(2) loss = loss1 + loss2 loss.backward() self.optimizer.step() return self.model.linear.weight.data.numpy() # The linear model in Networked Linear Regression section of the paper class LinearModel: def __init__(self, degree, features, label): mtx1 = 2 * degree * np.dot(features.T, features).astype('float64') mtx1 += 1 * np.eye(mtx1.shape[0]) mtx1_inv = np.linalg.inv(mtx1) mtx2 = 2 * degree * np.dot(features.T, label).T self.mtx1_inv = mtx1_inv self.mtx2 = mtx2 def forward(self, x): mtx2 = x + self.mtx2 mtx_inv = self.mtx1_inv return np.dot(mtx_inv, mtx2) # The Linear optimizer in Networked Linear Regression section of the paper class LinearOptimizer(Optimizer): def __init__(self, model): super(LinearOptimizer, self).__init__(model, None, None) def optimize(self, x_data, y_data, old_weight, regularizer_term): return self.model.forward(old_weight) # The Linear optimizer model which is implemented by pytorch class TorchLinearOptimizer(Optimizer): def __init__(self, model): criterion = torch.nn.MSELoss(reduction='mean') optimizer = torch.optim.RMSprop(model.parameters()) super(TorchLinearOptimizer, self).__init__(model, optimizer, criterion) def optimize(self, x_data, y_data, old_weight, regularizer_term): return super(TorchLinearOptimizer, self).optimize(x_data, y_data, old_weight, regularizer_term) # The Logistic optimizer model which is implemented by pytorch class TorchLogisticOptimizer(Optimizer): def __init__(self, model): criterion = torch.nn.BCELoss(reduction='mean') optimizer = torch.optim.RMSprop(model.parameters()) super(TorchLogisticOptimizer, self).__init__(model, optimizer, criterion) def optimize(self, x_data, y_data, old_weight, regularizer_term): return super(TorchLogisticOptimizer, self).optimize(x_data, y_data, old_weight, regularizer_term)
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Dual Update As mentioned in the paper, the dual update has a penalty function(line 10) which is either norm1, norm2, or mocha.
# %load algorithm/penalty.py import abc import numpy as np from abc import ABC # The abstract penalty function which has a function update class Penalty(ABC): def __init__(self, lambda_lasso, weight_vec, Sigma, n): self.lambda_lasso = lambda_lasso self.weight_vec = weight_vec self.Sigma = Sigma @abc.abstractmethod def update(self, new_u): pass # The norm2 penalty function class Norm2Pelanty(Penalty): def __init__(self, lambda_lasso, weight_vec, Sigma, n): super(Norm2Pelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n) self.limit = np.array(lambda_lasso * weight_vec) def update(self, new_u): normalized_u = np.where(np.linalg.norm(new_u, axis=1) >= self.limit) new_u[normalized_u] = (new_u[normalized_u].T * self.limit[normalized_u] / np.linalg.norm(new_u[normalized_u], axis=1)).T return new_u # The MOCHA penalty function class MOCHAPelanty(Penalty): def __init__(self, lambda_lasso, weight_vec, Sigma, n): super(MOCHAPelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n) self.normalize_factor = 1 + np.dot(2 * self.Sigma, 1/(self.lambda_lasso * self.weight_vec)) def update(self, new_u): for i in range(new_u.shape[1]): new_u[:, i] /= self.normalize_factor return new_u # The norm1 penalty function class Norm1Pelanty(Penalty): def __init__(self, lambda_lasso, weight_vec, Sigma, n): super(Norm1Pelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n) self.limit = np.array([np.zeros(n) for i in range(len(weight_vec))]) for i in range(n): self.limit[:, i] = lambda_lasso * weight_vec def update(self, new_u): normalized_u = np.where(abs(new_u) >= self.limit) new_u[normalized_u] = self.limit[normalized_u] * new_u[normalized_u] / abs(new_u[normalized_u]) return new_u
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Create SBM Graph The stochastic block model is a generative model for random graphs with some clusters structure. Two nodes within the same cluster of the empirical graph are connected by an edge with probability pin, two nodes from different clusters are connected by an edge with probability pout. Each node $i \in V$ represents a local dataset consisting of $m$ feature vectors $x^{(i,1)}, ... , x^{(i,m)} \in R^n$. The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ∼ N(0,I). The labels $y_1^{(i)}, . . . , y_m^{(i)} \in R$ of the nodes $i \in V$ are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon ∼ N(0,\sigma)$. To learn the weight $w^{(i)}$ ,we apply Algorithm 1 to a training set M obtained by randomly selecting 40% of the nodes.
from optimizer import * from torch.autograd import Variable #from graspy.simulations import sbm def get_sbm_data(cluster_sizes, G, W, m=5, n=2, noise_sd=0, is_torch_model=True): ''' :param cluster_sizes: a list containing the size of each cluster :param G: generated SBM graph with defined clusters using graspy.simulations :param W: a list containing the weight vectors for each cluster :param m, n: shape of features vector for each node :param pin: the probability of edges inside each cluster :param pout: the probability of edges between the clusters :param noise_sd: the standard deviation of the noise for calculating the labels :return B: adjacency matrix of the graph :return weight_vec: a list containing the edges's weights of the graph :return true_labels: a list containing the true labels of the nodes :return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 ''' N = len(G) E = int(G.number_of_edges())#int(len(np.argwhere(G > 0))/2) ''' N: total number of nodes E: total number of edges ''' # create B(adjacency matrix) and edges's weights vector(weight_vec) based on the graph G B = np.zeros((E, N)) ''' B: adjacency matrix of the graph with the shape of E*N ''' weight_vec = np.zeros(E) ''' weight_vec: a list containing the edges's weights of the graph with the shape of E ''' cnt = 0 for i, j in G.edges: if i > j: continue B[cnt, i] = 1 B[cnt, j] = -1 weight_vec[cnt] = 1 cnt += 1 # create the data of each node needed for the algorithm 1 node_degrees = np.array((1.0 / (np.sum(abs(B), 0)))).ravel() ''' node_degrees: a list containing the nodes degree for the alg1 (1/N_i) ''' datapoints = {} ''' datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1, which are features, label, degree, and also the optimizer model for each node ''' true_labels = [] ''' true_labels: the true labels for the nodes of the graph ''' cnt = 0 for i, cluster_size in enumerate(cluster_sizes): for j in range(cluster_size): features = np.random.normal(loc=0.0, scale=1.0, size=(m, n)) ''' features: the feature vector of node i which are i.i.d. realizations of a standard Gaussian random vector x~N(0,I) ''' label = np.dot(features, W[i]) + np.random.normal(0,noise_sd) ''' label: the label of the node i that is generated according to the linear model y = x^T w + e ''' true_labels.append(label) if is_torch_model: model = TorchLinearModel(n) optimizer = TorchLinearOptimizer(model) features = Variable(torch.from_numpy(features)).to(torch.float32) label = Variable(torch.from_numpy(label)).to(torch.float32) else: model = LinearModel(node_degrees[i], features, label) optimizer = LinearOptimizer(model) ''' model : the linear model for the node i optimizer : the optimizer model for the node i ''' datapoints[cnt] = { 'features': features, 'degree': node_degrees[i], 'label': label, 'optimizer': optimizer } cnt += 1 return B, weight_vec, np.array(true_labels), datapoints
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Compare Results As the result we compare the MSE of Algorithm 1 with plain linear regression and decision tree regression
# %load results/compare_results.py import numpy as np from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error def get_algorithm1_MSE(datapoints, predicted_w, samplingset): ''' :param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 :param predicted_w: the predicted weigh vectors for each node :param samplingset: the sampling set for the algorithm 1 :return alg1_MSE: the MSE of the algorithm 1 for all the nodes, the samplingset and other nodes (test set) ''' not_samplingset = [i for i in range(len(datapoints)) if i not in samplingset] true_labels = [] pred_labels = [] for i in range(len(datapoints)): features = np.array(datapoints[i]['features']) label = np.array(datapoints[i]['label']) true_labels.append(label) pred_labels.append(np.dot(features, predicted_w[i])) pred_labels = np.array(pred_labels) true_labels = np.array(true_labels) alg1_MSE = {'total': mean_squared_error(true_labels, pred_labels), 'train': mean_squared_error(true_labels[samplingset], pred_labels[samplingset]), 'test': mean_squared_error(true_labels[not_samplingset], pred_labels[not_samplingset])} return alg1_MSE def get_linear_regression_MSE(x, y, samplingset, not_samplingset): ''' :param x: a list containing the features of the nodes :param y: a list containing the labels of the nodes :param samplingset: the training dataset :param not_samplingset: the test dataset :return linear_regression_MSE : the MSE of linear regression for all the nodes, the samplingset and other nodes (test set) ''' model = LinearRegression().fit(x[samplingset], y[samplingset]) pred_y = model.predict(x) linear_regression_MSE = {'total': mean_squared_error(y, pred_y), 'train': mean_squared_error(y[samplingset], pred_y[samplingset]), 'test': mean_squared_error(y[not_samplingset], pred_y[not_samplingset])} return linear_regression_MSE def get_decision_tree_MSE(x, y, samplingset, not_samplingset): ''' :param x: a list containing the features of the nodes :param y: a list containing the labels of the nodes :param samplingset: the training dataset :param not_samplingset: the test dataset :return decision_tree_MSE : the MSE of decision tree for all the nodes, the samplingset and other nodes (test set) ''' max_depth = 2 regressor = DecisionTreeRegressor(max_depth=max_depth) regressor.fit(x[samplingset], y[samplingset]) pred_y = regressor.predict(x) decision_tree_MSE = {'total': mean_squared_error(y, pred_y), 'train': mean_squared_error(y[samplingset], pred_y[samplingset]), 'test': mean_squared_error(y[not_samplingset], pred_y[not_samplingset])} return decision_tree_MSE def get_scores(datapoints, predicted_w, samplingset): N = len(datapoints) ''' N : the total number of nodes ''' # calculate algorithm1 MSE alg_1_score = get_algorithm1_MSE(datapoints, predicted_w, samplingset) # prepare the data for calculating the linear regression and decision tree regression MSEs X = [] ''' X: an array containing the features of all the nodes ''' true_labels = [] ''' true_labels: an array containing the labels of all the nodes ''' for i in range(len(datapoints)): X.append(np.array(datapoints[i]['features'])) true_labels.append(np.array(datapoints[i]['label'])) X = np.array(X) true_labels = np.array(true_labels) m, n = X[0].shape x = X.reshape(-1, n) y = true_labels.reshape(-1, 1) reformated_samplingset = [] for item in samplingset: for i in range(m): reformated_samplingset.append(m * item + i) reformated_not_samplingset = [i for i in range(m * N) if i not in reformated_samplingset] # calculate linear regression MSE linear_regression_score = get_linear_regression_MSE(x, y, reformated_samplingset, reformated_not_samplingset) # calculate decision tree MSE decision_tree_score = get_decision_tree_MSE(x, y, reformated_samplingset, reformated_not_samplingset) return alg_1_score, linear_regression_score, decision_tree_score
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
SBM with Two Clusters This SBM has two clusters $|C_1| = |C_2| = 100$.Two nodes within the same cluster are connected by an edge with probability `pin=0.5`, and two nodes from different clusters are connected by an edge with probability `pout=0.01`. Each node $i \in V$ represents a local dataset consisting of feature vectors $x^{(i,1)}, ... , x^{(i,5)} \in R^2$.The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ~ N(0,I).The labels $y_1^{(i)}, . . . , y_5^{(i)} \in R$ for each node $i \in V$are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon = 0$. The tuning parameter $\lambda$ in algorithm1 is manually chosen, guided by the resulting MSE, as $\lambda=0.01$ for norm1 and norm2 and also $\lambda=0.05$ for mocha penalty function. To learn the weight $w^{(i)}$ ,we apply Algorithm 1 to a training set M obtained by randomly selecting 40% of the nodes and use the rest as test set. As the result we compare the mean MSE of Algorithm 1 with plain linear regression and decision tree regression with respect to the different random sampling sets.
#from graspy.simulations import sbm import networkx as nx def get_sbm_2blocks_data(m=5, n=2, pin=0.5, pout=0.01, noise_sd=0, is_torch_model=True): ''' :param m, n: shape of features vector for each node :param pin: the probability of edges inside each cluster :param pout: the probability of edges between the clusters :param noise_sd: the standard deviation of the noise for calculating the labels :return B: adjacency matrix of the graph :return weight_vec: a list containing the edges's weights of the graph :return true_labels: a list containing the true labels of the nodes :return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 ''' cluster_sizes = [100, 100] # generate graph G which is a SBM wich 2 clusters #G = sbm(n=cluster_sizes, p=[[pin, pout],[pout, pin]]) probs = [[pin, pout], [pout, pin]] G = nx.stochastic_block_model(cluster_sizes, probs) ''' G: generated SBM graph with 2 clusters ''' # define weight vectors for each cluster of the graph W1 = np.array([2, 2]) ''' W1: the weigh vector for the first cluster ''' W2 = np.array([-2, 2]) ''' W2: the weigh vector for the second cluster ''' W = [W1, W2] return get_sbm_data(cluster_sizes, G, W, m, n, noise_sd, is_torch_model) a = nx.stochastic_block_model([100, 100], [[0.1,0.01], [0.01,0.1]]) nx.draw(a,with_labels=True)
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Plot the MSE with respect to the different random sampling sets for each penalty function, the plots are in the log scale
%time import random import matplotlib.pyplot as plt from collections import defaultdict PENALTY_FUNCS = ['norm1', 'norm2', 'mocha'] LAMBDA_LASSO = {'norm1': 0.01, 'norm2': 0.01, 'mocha': 0.05} K = 1000 B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=0.01, is_torch_model=False) E, N = B.shape alg1_scores = defaultdict(list) linear_regression_scores = defaultdict(list) decision_tree_scores = defaultdict(list) ##samplingset = random.sample([i for i in range(N)], k=int(0.4* N)) ##lambda_lasso = LAMBDA_LASSO['mocha'] ##algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, PENALTY_FUNCS[0]) num_tries = 5 for i in range(num_tries): samplingset = random.sample([i for i in range(N)], k=int(0.4* N)) for penalty_func in PENALTY_FUNCS: lambda_lasso = LAMBDA_LASSO[penalty_func] _, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func) alg1_score, linear_regression_score, decision_tree_score = get_scores(datapoints, predicted_w, samplingset) alg1_scores[penalty_func].append(alg1_score) linear_regression_scores[penalty_func].append(linear_regression_score) decision_tree_scores[penalty_func].append(decision_tree_score) %time labels = ['alg1,norm1', 'alg1,norm2', 'alg1,mocha', 'linear reg', 'decision tree'] x_pos = np.arange(len(labels)) print('algorithm 1, norm1:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm1']])) print('algorithm 1, norm2:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm2']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm2']])) print('algorithm 1, mocha:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['mocha']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['mocha']])) print('linear regression:', '\n mean train MSE:', np.mean([item['train'] for item in linear_regression_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in linear_regression_scores['norm1']])) print('decision tree:', '\n mean train MSE:', np.mean([item['train'] for item in decision_tree_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in decision_tree_scores['norm1']])) alg1_norm1_score = [item['total'] for item in alg1_scores['norm1']] alg1_norm2_score = [item['total'] for item in alg1_scores['norm2']] alg1_mocha_score = [item['total'] for item in alg1_scores['mocha']] linear_regression_score = [item['total'] for item in linear_regression_scores['norm1']] decision_tree_score = [item['total'] for item in decision_tree_scores['norm1']] mean_MSEs = [ np.mean(alg1_norm1_score), np.mean(alg1_norm2_score), np.mean(alg1_mocha_score), np.mean(linear_regression_score), np.mean(decision_tree_score) ] std_MSEs = [ np.std(alg1_norm1_score), np.std(alg1_norm2_score), np.std(alg1_mocha_score), np.std(linear_regression_score), np.std(decision_tree_score)] fig, ax = plt.subplots() ax.bar(x_pos, mean_MSEs, yerr=std_MSEs, align='center', alpha=0.5, ecolor='black', capsize=20) ax.set_ylabel('MSE') ax.set_xticks(x_pos) ax.set_xticklabels(labels) ax.set_yscale('log') ax.set_title('error bars plot') plt.show() plt.close()
CPU times: user 3 µs, sys: 1 µs, total: 4 µs Wall time: 26.9 µs algorithm 1, norm1: mean train MSE: 8.845062633626295e-06 mean test MSE: 8.411817666751793e-06 algorithm 1, norm2: mean train MSE: 8.937548539721603e-06 mean test MSE: 8.583071087032906e-06 algorithm 1, mocha: mean train MSE: 0.0011548714912415193 mean test MSE: 0.059934032754604294 linear regression: mean train MSE: 4.174356924195071 mean test MSE: 3.993515488232095 decision tree: mean train MSE: 4.198915999492509 mean test MSE: 4.493515851377256
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Plot the MSE with respect to the different noise standard deviations (0.01, 0.1, 1.0) for each penalty function, as you can see algorithm 1 is somehow robust to the noise.
%time import random import matplotlib.pyplot as plt PENALTY_FUNCS = ['norm1', 'norm2', 'mocha'] lambda_lasso = 0.01 K = 20 sampling_ratio = 0.6 pouts = [0.01, 0.1, 0.2, 0.4, 0.6] colors = ['steelblue', 'darkorange', 'green'] for penalty_func in PENALTY_FUNCS: print('penalty_func:', penalty_func) for i, noise in enumerate([0.01, 0.1, 1.0]): MSEs_mean = {} MSEs_std = {} for pout in pouts: num_tries = 5 pout_mses = [] for j in range(num_tries): B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=pout, noise_sd=noise, is_torch_model=False) E, N = B.shape samplingset = random.sample([i for i in range(N)], k=int(sampling_ratio * N)) _, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func) alg1_score, _, _ = get_scores(datapoints, predicted_w, samplingset) pout_mses.append(alg1_score['total']) MSEs_mean[pout] = np.mean(pout_mses) MSEs_std[pout] = np.std(pout_mses) plt.errorbar(list(MSEs_mean.keys()), list(MSEs_mean.values()), yerr=list(MSEs_std.values()), ecolor=colors[i], capsize=3, label='noise=' + str(noise), c=colors[i]) print('noise', noise) print(' MSEs:', MSEs_mean) plt.xlabel('p_out') plt.ylabel('MSE') plt.legend(loc='best') plt.title('Penalty function : %s' % penalty_func) plt.show() plt.close()
CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 29.3 µs penalty_func: norm1 noise 0.01 MSEs: {0.01: 2.705315973442155, 0.1: 2.8803085633466834, 0.2: 3.123534394242319, 0.4: 3.118645741846799, 0.6: 3.2209511562160515} noise 0.1 MSEs: {0.01: 2.858618729737168, 0.1: 2.8760340056295552, 0.2: 3.0985472679149177, 0.4: 3.166597939776252, 0.6: 3.259783197200458} noise 1.0 MSEs: {0.01: 3.940328318550642, 0.1: 3.8713989829443323, 0.2: 3.8776937800828435, 0.4: 4.023545611925063, 0.6: 4.102011359863877}
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
Plot the MSE with respect to the different sampling ratios (0.2, 0.4, 0.6) for each penalty function
import random import matplotlib.pyplot as plt PENALTY_FUNCS = ['norm1', 'norm2', 'mocha'] lambda_lasso = 0.01 K = 30 sampling_ratio = 0.6 pouts = [0.01, 0.1, 0.2, 0.4, 0.6] colors = ['steelblue', 'darkorange', 'green'] for penalty_func in PENALTY_FUNCS: print('penalty_func:', penalty_func) for i, sampling_ratio in enumerate([0.2, 0.4, 0.6]): MSEs_mean = {} MSEs_std = {} for pout in pouts: num_tries = 5 pout_mses = [] for j in range(num_tries): B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=pout, is_torch_model=False) E, N = B.shape samplingset = random.sample([i for i in range(N)], k=int(sampling_ratio * N)) _, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func) alg1_score, _, _ = get_scores(datapoints, predicted_w, samplingset) pout_mses.append(alg1_score['total']) MSEs_mean[pout] = np.mean(pout_mses) MSEs_std[pout] = np.std(pout_mses) plt.errorbar(list(MSEs_mean.keys()), list(MSEs_mean.values()), yerr=list(MSEs_std.values()), ecolor=colors[i], capsize=3, label='M=' + str(sampling_ratio), c=colors[i]) print('M:', sampling_ratio) print('MSE:', MSEs_mean) plt.xlabel('p_out') plt.ylabel('MSE') plt.legend(loc='best') plt.title('Penalty function : %s' % penalty_func) plt.show() plt.close()
penalty_func: norm1 M: 0.2 MSE: {0.01: 6.011022085530584, 0.1: 5.854915785166783, 0.2: 6.136745451677013, 0.4: 6.165292827085321, 0.6: 6.495651188025879} M: 0.4 MSE: {0.01: 4.224003404596983, 0.1: 4.423759609325218, 0.2: 4.394123644502406, 0.4: 4.516906390091848, 0.6: 4.494963955858758} M: 0.6 MSE: {0.01: 2.7074374532565324, 0.1: 2.668277009267647, 0.2: 2.79190605215051, 0.4: 3.1706581302101258, 0.6: 3.0742020650648056}
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
SBM with Five Clusters The size of the clusters are {70, 10, 50, 100, 150} with random weight vectors $\in R^2$ selected uniformly from $[0,1)$. We run Algorithm 1 with a fixed `pin = 0.5` and `pout = 0.001`, and a fixed number of 1000 iterations. Each node $i \in V$ represents a local dataset consisting of feature vectors $x^{(i,1)}, ... , x^{(i,5)} \in R^2$.The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ~ N(0,I).The labels $y_1^{(i)}, . . . , y_5^{(i)} \in R$ for each node $i \in V$are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon = 0$. The tuning parameter $\lambda$ in algorithm1 is manually chosen, guided by the resulting MSE, as $\lambda=0.01$ for norm1 and norm2 and also $\lambda=0.05$ for mocha penalty function. We assume that labels $y^{(i)}$ are available for 20% of the graph nodes. We randomly choose the training set M and use the rest as test set.As the result we compare the mean MSE of Algorithm 1 with plain linear regression and decision tree regression with respect to the different random sampling sets.
from graspy.simulations import sbm def get_sbm_5blocks_data(m=5, n=2, pin=0.5, pout=0.01, noise_sd=0, is_torch_model=True): ''' :param m, n: shape of features vector for each node :param pin: the probability of edges inside each cluster :param pout: the probability of edges between the clusters :param noise_sd: the standard deviation of the noise for calculating the labels :return B: adjacency matrix of the graph :return weight_vec: a list containing the edges's weights of the graph :return true_labels: a list containing the true labels of the nodes :return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 ''' cluster_sizes = [70, 10, 50, 100, 150] p = [[pin if i==j else pout for i in range(len(cluster_sizes))] for j in range(len(cluster_sizes))] # generate graph G which is a SBM wich 2 clusters G = sbm(n=cluster_sizes, p=p) ''' G: generated SBM graph with 2 clusters ''' # define weight vectors for each cluster of the graph W = [] for i in range(len(cluster_sizes)): # the weigh vector for the ith cluster W.append(np.random.random(n)) return get_sbm_data(cluster_sizes, G, W, m, n, noise_sd, is_torch_model) import random PENALTY_FUNCS = ['norm1', 'norm2', 'mocha'] LAMBDA_LASSO = {'norm1': 0.01, 'norm2': 0.01, 'mocha': 0.05} K = 1000 B, weight_vec, true_labels, datapoints = get_sbm_5blocks_data(pin=0.5, pout=0.001, is_torch_model=False) E, N = B.shape alg1_scores = defaultdict(list) linear_regression_scores = defaultdict(list) decision_tree_scores = defaultdict(list) num_tries = 5 for i in range(num_tries): samplingset = random.sample([i for i in range(N)], k=int(0.2* N)) for penalty_func in PENALTY_FUNCS: lambda_lasso = LAMBDA_LASSO[penalty_func] _, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func) alg1_score, linear_regression_score, decision_tree_score = get_scores(datapoints, predicted_w, samplingset) alg1_scores[penalty_func].append(alg1_score) linear_regression_scores[penalty_func].append(linear_regression_score) decision_tree_scores[penalty_func].append(decision_tree_score) labels = ['alg1,norm1', 'alg1,norm2', 'alg1,mocha', 'linear reg', 'decision tree'] x_pos = np.arange(len(labels)) print('algorithm 1, norm1:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm1']])) print('algorithm 1, norm2:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm2']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm2']])) print('algorithm 1, mocha:', '\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['mocha']]), '\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['mocha']])) print('linear regression:', '\n mean train MSE:', np.mean([item['train'] for item in linear_regression_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in linear_regression_scores['norm1']])) print('decision tree:', '\n mean train MSE:', np.mean([item['train'] for item in decision_tree_scores['norm1']]), '\n mean test MSE:', np.mean([item['test'] for item in decision_tree_scores['norm1']])) alg1_norm1_score = [item['total'] for item in alg1_scores['norm1']] alg1_norm2_score = [item['total'] for item in alg1_scores['norm2']] alg1_mocha_score = [item['total'] for item in alg1_scores['mocha']] linear_regression_score = [item['total'] for item in linear_regression_scores['norm1']] decision_tree_score = [item['total'] for item in decision_tree_scores['norm1']] mean_MSEs = [ np.mean(alg1_norm1_score), np.mean(alg1_norm2_score), np.mean(alg1_mocha_score), np.mean(linear_regression_score), np.mean(decision_tree_score) ] std_MSEs = [ np.std(alg1_norm1_score), np.std(alg1_norm2_score), np.std(alg1_mocha_score), np.std(linear_regression_score), np.std(decision_tree_score)] fig, ax = plt.subplots() ax.bar(x_pos, mean_MSEs, yerr=std_MSEs, align='center', alpha=0.5, ecolor='black', capsize=20) ax.set_ylabel('MSE') ax.set_xticks(x_pos) ax.set_xticklabels(labels) ax.set_yscale('log') ax.set_title('error bars plot') plt.show() plt.close() import scipy version = scipy.version.version print(version)
_____no_output_____
MIT
SBM_experiment.ipynb
YuTian8328/flow-based-clustering
E2E ML on GCP: MLOps stage 3 : formalization: get started with custom training pipeline components View on GitHub Open in Google Cloud Notebooks OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 3 : formalization: get started with custom training pipeline components. DatasetThe dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip. InstallationsInstall *one time* the packages for executing the MLOps notebooks.
ONCE_ONLY = False if ONCE_ONLY: ! pip3 install -U tensorflow==2.5 $USER_FLAG ! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG ! pip3 install -U tensorflow-transform==1.2 $USER_FLAG ! pip3 install -U tensorflow-io==0.18 $USER_FLAG ! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG ! pip3 install --upgrade google-cloud-bigquery $USER_FLAG ! pip3 install --upgrade google-cloud-logging $USER_FLAG ! pip3 install --upgrade apache-beam[gcp] $USER_FLAG ! pip3 install --upgrade pyarrow $USER_FLAG ! pip3 install --upgrade cloudml-hypertune $USER_FLAG ! pip3 install --upgrade kfp $USER_FLAG
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
REGION = "us-central1" # @param {type: "string"}
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
! gsutil mb -l $REGION $BUCKET_NAME
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Finally, validate access to your Cloud Storage bucket by examining its contents:
! gsutil ls -al $BUCKET_NAME
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Service Account**If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your GCP project id from gcloud shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].strip() print("Service Account:", SERVICE_ACCOUNT)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set service account access for Vertex AI PipelinesRun the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
import google.cloud.aiplatform as aip import json from kfp import dsl from kfp.v2 import compiler from kfp.v2.dsl import component
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
aip.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set hardware acceleratorsYou can set hardware accelerators for training and prediction.Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators).*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1) if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set pre-built containersSet the pre-built Docker container image for training and prediction.For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).
if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2.5".replace(".", "-") if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format( REGION.split("-")[0], TRAIN_VERSION ) DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], DEPLOY_VERSION ) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Set machine typeNext, set the machine type to use for training and prediction.- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Location of Cloud Storage training data.Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
IMPORT_FILE = ( "gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv" )
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
# Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow==2.5.0',\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: Flowers image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `data-format` The format of the data. In this example, the data is exported from an `ImageDataSet` and will be in a JSONL format. - `train-data-dir`, `val-data-dir`, `test-data-dir`: The Cloud Storage locations of the train, validation and test data. When using Vertex AI custom training, these locations will be specified in the corresponding environment variables: `AIP_TRAINING_DATA_URI`, `AIP_VALIDATION_DATA_URI`, and `AIP_TEST_DATA_URI`. - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `distributr`: single, mirrored or distributed training strategy.- Data preprocessing (`get_data()`): - Compiles the one or more JSONL data files for a dataset, and constructs a `tf.data.Dataset()` generator for data preprocessing and model feeding.- Model architecture (`get_model()`): - Builds the corresponding model architecture.- Training (`train_model()`): - Trains the model- Model artifact saving - Saves the model artifacts where the Cloud Storage location is determined based on the type of distribution training strategy.
%%writefile custom/trainer/task.py import tensorflow as tf from tensorflow.python.client import device_lib import argparse import os import sys import json import logging import tqdm def parse_args(): parser = argparse.ArgumentParser(description="TF.Keras Image Classification") # data source parser.add_argument("--data-format", default=os.getenv('AIP_DATA_FORMAT'), dest="data_format", type=str, help="data format") parser.add_argument("--train-data-dir", default=os.getenv('AIP_TRAINING_DATA_URI'), dest="train_data_dir", type=str, help="train data directory") parser.add_argument("--val-data-dir", default=os.getenv('AIP_VALIDATION_DATA_URI'), dest="val_data_dir", type=str, help="validation data directory") parser.add_argument("--test-data-dir", default=os.getenv('AIP_TEST_DATA_URI'), dest="test_data_dir", type=str, help="test data directory") # data preprocessing parser.add_argument("--image-width", dest="image_width", default=32, type=int, help="image width") parser.add_argument("--image-height", dest="image_height", default=32, type=int, help="image height") # model artifact location parser.add_argument( "--model-dir", default=os.getenv("AIP_MODEL_DIR"), type=str, help="model directory", ) # training hyperparameters parser.add_argument( "--lr", dest="lr", default=0.01, type=float, help="Learning rate." ) parser.add_argument("--batch-size", default=16, type=int, help="mini-batch size") parser.add_argument( "--epochs", default=10, type=int, help="number of training epochs" ) parser.add_argument( "--steps", dest="steps", default=200, type=int, help="Number of steps per epoch.", ) parser.add_argument( "--distribute", dest="distribute", type=str, default="single", help="distributed training strategy", ) args = parser.parse_args() return args args = parse_args() logging.getLogger().setLevel(logging.DEBUG) logging.info('DEVICES' + str(device_lib.list_local_devices())) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") logging.info("Single device training") # Single Machine, multiple compute device elif args.distribute == 'mirrored': strategy = tf.distribute.MirroredStrategy() logging.info("Mirrored Strategy distributed training") # Multi Machine, multiple compute device elif args.distribute == 'multiworker': strategy = tf.distribute.MultiWorkerMirroredStrategy() logging.info("Multi-worker Strategy distributed training") logging.info('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) logging.info('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) NUM_WORKERS = strategy.num_replicas_in_sync GLOBAL_BATCH_SIZE = args.batch_size * NUM_WORKERS def _is_chief(task_type, task_id): ''' Check for primary if multiworker training ''' return (task_type == 'chief') or (task_type == 'worker' and task_id == 0) or task_type is None def get_data(): logging.info('DATA_FORMAT ' + args.data_format) logging.info('TRAINING_DATA_URI ' + args.train_data_dir) logging.info('VALIDATION_DATA_URI ' + args.val_data_dir) logging.info('TEST_DATA_URI ' + args.test_data_dir) class_names = ["daisy", "dandelion", "roses", "sunflowers", "tulips"] class_indices = dict(zip(class_names, range(len(class_names)))) num_classes = len(class_names) GLOBAL_BATCH_SIZE = args.batch_size * NUM_WORKERS def parse_image(filename): image = tf.io.read_file(filename) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [args.image_width, args.image_height]) return image def scale(image, label): image = tf.cast(image, tf.float32) image /= 255.0 return image, label def extract(data_dir, batch_size=GLOBAL_BATCH_SIZE, repeat=True): data = [] labels = [] for data_uri in tqdm.tqdm(tf.io.gfile.glob(pattern=data_dir)): with tf.io.gfile.GFile(name=data_uri, mode="r") as gfile: for line in gfile.readlines(): instance = json.loads(line) data.append(instance["imageGcsUri"]) classification_annotation = instance["classificationAnnotations"][0] label = classification_annotation["displayName"] labels.append(class_indices[label]) data_dataset = tf.data.Dataset.from_tensor_slices(data) data_dataset = data_dataset.map( parse_image, num_parallel_calls=tf.data.experimental.AUTOTUNE ) label_dataset = tf.data.Dataset.from_tensor_slices(labels) label_dataset = label_dataset.map(lambda x: tf.one_hot(x, num_classes)) dataset = tf.data.Dataset.zip((data_dataset, label_dataset)).map(scale).cache().shuffle(batch_size * 32) if repeat: dataset = dataset.repeat() dataset = dataset.batch(batch_size) # Add property to retain the class names dataset.class_names = class_names return dataset logging.info('Prepare training data') train_dataset = extract(args.train_data_dir) logging.info('Prepare validation data') val_dataset = extract(args.val_data_dir, batch_size=1, repeat=False) return num_classes, train_dataset, val_dataset def get_model(num_classes): logging.info("Get model architecture") model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( 32, 3, activation="relu", input_shape=(args.image_width, args.image_height, 3) ), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation="relu"), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(num_classes, activation="softmax"), ] ) model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr), metrics=["accuracy"], ) return model def train_model(model, train_dataset, val_dataset): logging.info("Start model training") history = model.fit( x=train_dataset, epochs=args.epochs, validation_data=val_dataset, steps_per_epoch=args.steps ) return history num_classes, train_dataset, val_dataset = get_data() with strategy.scope(): model = get_model(num_classes=num_classes) history = train_model(model, train_dataset, val_dataset) logging.info("Save the model to: " + args.model_dir) if args.distribute == 'multiworker': task_type, task_id = (strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) else: task_type, task_id = None, None # single, mirrored or primary for multiworker if _is_chief(task_type, task_id): model.save(args.model_dir) # non-primary workers for multi-workers else: # each worker saves their model instance to a unique temp location worker_dir = args.model_dir + '/workertemp_' + str(task_id) tf.io.gfile.makedirs(worker_dir) model.save(worker_dir)
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_flowers.tar.gz !gsutil ls gs://andy-1234-221921aip-20211201001323/pipeline_root/custom_icn_training/aiplatform-custom-training-2021-12-01-00:39:25.109/dataset-899163017009168384-image_classification_multi_label-2021-12-01T00:39:26.044880Z/
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Construct custom training pipelineIn the example below, you construct a pipeline for training a custom model using pre-built Google Cloud Pipeline Components for Vertex AI Training, as follows:1. Pipeline arguments, specify the locations of: - `import_file`: The CSV index file for the dataset. - `python_package`: The custom training Python package. - `python_module`: The entry module in the package to execute.2. Use the prebuilt component `ImageDatasetCreateOp` to create a Vertex AI Dataset resource, where: - The display name for the dataset is passed into the pipeline. - The import file for the dataset is passed into the pipeline. - The component returns the dataset resource as `outputs["dataset"]`3. Use the prebuilt component `CustomPythonPackageTrainingJobRunOp` to train a custom model and upload the custom model as a Vertex AI Model resource, where: - The display name for the dataset is passed into the pipeline. - The dataset is the output from the `ImageDatasetCreateOp`. - The python package, command line argument are passed into the pipeline. - The training and serving containers are specified in the pipeline definition. - The component returns the model resource as `outputs["model"]`.4. Use the prebuilt component `EndpointCreateOp` to create a Vertex AI Endpoint to deploy the trained model to, where: - Since the component has no dependencies on other components, by default it would be executed in parallel with the model training. - The `after(training_op)` is added to serialize its execution, so its only executed if the training operation completes successfully. - The component returns the endpoint resource as `outputs["endpoint"]`.5. Use the prebuilt component `ModelDeployOp` to deploy the trained Vertex AI model to, where: - The display name for the dataset is passed into the pipeline. - The model is the output from the `CustomPythonPackageTrainingJobRunOp`. - The endpoint is the output from the `EndpointCreateOp`*Note:* Since each component is executed as a graph node in its own execution context, you pass the parameter `project` for each component op, in constrast to doing a `aip.init(project=project)` if this was a Python script calling the SDK methods directly within the same execution context.
from google_cloud_pipeline_components import aiplatform as gcc_aip PIPELINE_ROOT = "{}/pipeline_root/custom_icn_training".format(BUCKET_NAME) @dsl.pipeline( name="custom-icn-training", description="Custom image classification training" ) def pipeline( import_file: str, display_name: str, python_package: str, python_module: str, project: str = PROJECT_ID, region: str = REGION, ): dataset_op = gcc_aip.ImageDatasetCreateOp( project=project, display_name=display_name, gcs_source=import_file, import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification, ) training_op = gcc_aip.CustomPythonPackageTrainingJobRunOp( project=project, display_name=display_name, dataset=dataset_op.outputs["dataset"], # Training python_package_gcs_uri=python_package, python_module_name=python_module, container_uri=TRAIN_IMAGE, staging_bucket=PIPELINE_ROOT, annotation_schema_uri=aip.schema.dataset.annotation.image.classification, args=["--epochs", "50", "--image-width", "32", "--image-height", "32"], replica_count=1, machine_type=TRAIN_COMPUTE, accelerator_type=TRAIN_GPU.name, accelerator_count=TRAIN_NGPU, # Serving - As part of this operation, the model is registered to Vertex AI model_serving_container_image_uri=DEPLOY_IMAGE, model_display_name=display_name, ) endpoint_op = gcc_aip.EndpointCreateOp( project=project, location=region, display_name=display_name, ).after(training_op) deploy_op = gcc_aip.ModelDeployOp( model=training_op.outputs["model"], endpoint=endpoint_op.outputs["endpoint"], dedicated_resources_min_replica_count=1, dedicated_resources_max_replica_count=1, dedicated_resources_machine_type="n1-standard-4", )
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Compile and execute the pipelineNext, you compile the pipeline and then exeute it. The pipeline takes the following parameters, which are passed as the dictionary `parameter_values`:- `import_file`: The Cloud Storage path to the dataset index file.- `display_name`: The display name for the generated Vertex AI resources.- `python_package`: The Python package for the custom training job.- `python_module`: The Python module in the package to execute.- `project`: The project ID.- `region`: The region.
compiler.Compiler().compile( pipeline_func=pipeline, package_path="custom_icn_training.json" ) pipeline = aip.PipelineJob( display_name="custom_icn_training", template_path="custom_icn_training.json", pipeline_root=PIPELINE_ROOT, parameter_values={ "import_file": IMPORT_FILE, "display_name": "flowers" + TIMESTAMP, "python_package": f"{BUCKET_NAME}/trainer_flowers.tar.gz", "python_module": "trainer.task", "project": PROJECT_ID, "region": REGION, }, ) pipeline.run() ! rm -f custom_icn_training.json
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Delete a pipeline jobAfter a pipeline job is completed, you can delete the pipeline job with the method `delete()`. Prior to completion, a pipeline job can be canceled with the method `cancel()`.
pipeline.delete()
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- AutoML Training Job- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
delete_all = True if delete_all: # Delete the dataset using the Vertex dataset object try: if "dataset" in globals(): dataset.delete() except Exception as e: print(e) # Delete the model using the Vertex model object try: if "model" in globals(): model.delete() except Exception as e: print(e) # Delete the endpoint using the Vertex endpoint object try: if "endpoint" in globals(): endpoint.delete() except Exception as e: print(e) # Delete the AutoML or Pipeline training job try: if "dag" in globals(): dag.delete() except Exception as e: print(e) # Delete the custom training job try: if "job" in globals(): job.delete() except Exception as e: print(e) # Delete the batch prediction job using the Vertex batch prediction object try: if "batch_predict_job" in globals(): batch_predict_job.delete() except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object try: if "hpt_job" in globals(): hpt_job.delete() except Exception as e: print(e) if "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME
_____no_output_____
Apache-2.0
notebooks/community/ml_ops/stage3/get_started_with_custom_training_pipeline_components.ipynb
changlan/vertex-ai-samples
SAMUR Emergency Frequencies This notebook explores how the frequency of different types of emergency changes with time in relation to different periods (hours of the day, days of the week, months of the year...) and locations in Madrid. This will be useful for constructing a realistic emergency generator in the city simulation.Let's start with some imports and setup, and then read the table.
import pandas as pd import datetime import matplotlib.pyplot as plt import yaml %matplotlib inline df = pd.read_csv("../data/emergency_data.csv") df.head()
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
The column for the time of the call is a string, so let's change that into a timestamp.
df["time_call"] = pd.to_datetime(df["Solicitud"])
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We will also need to assign a numerical code to each district of the city in order to properly vectorize the distribution an make it easier to work along with other parts of the project.
district_codes = { 'Centro': 1, 'Arganzuela': 2, 'Retiro': 3, 'Salamanca': 4, 'Chamartín': 5, 'Tetuán': 6, 'Chamberí': 7, 'Fuencarral - El Pardo': 8, 'Moncloa - Aravaca': 9, 'Latina': 10, 'Carabanchel': 11, 'Usera': 12, 'Puente de Vallecas': 13, 'Moratalaz': 14, 'Ciudad Lineal': 15, 'Hortaleza': 16, 'Villaverde': 17, 'Villa de Vallecas': 18, 'Vicálvaro': 19, 'San Blas - Canillejas': 20, 'Barajas': 21, } df["district_code"] = df.Distrito.apply(lambda x: district_codes[x])
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
Each emergency has already been assigned a severity level, depending on the nature of the reported emergency.
df["severity"] = df["Gravedad"]
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We also need the hour, weekday and month of the event in order to assign it in the various distributions.
df["hour"] = df["time_call"].apply(lambda x: x.hour) # From 0 to 23 df["weekday"] = df["time_call"].apply(lambda x: x.weekday()+1) # From 1 (Mon) to 7 (Sun) df["month"] = df["time_call"].apply(lambda x: x.month)
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
Let's also strip down the dataset to just the columns we need right now.
df = df[["district_code", "severity", "time_call", "hour", "weekday", "month"]] df.head()
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We are going to group the distributions by severity.
emergencies_per_grav = df.severity.value_counts().sort_index().rename("total_emergencies") emergencies_per_grav
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We will also need the global frequency of the emergencies:
total_seconds = (df.time_call.max()-df.time_call.min()).total_seconds() frequencies_per_grav = (emergencies_per_grav / total_seconds).rename("emergency_frequencies") frequencies_per_grav
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
Each emergency will need to be assigne a district. Assuming independent distribution of emergencies by district and time, each will be assigned to a district according to a global probability based on this dataset, as follows.
prob_per_district = (df.district_code.value_counts().sort_index()/df.district_code.value_counts().sum()).rename("distric_weight") prob_per_district
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
In order to be able to simplify the generation of emergencies, we are going to assume that the distributions of emergencies per hour, per weekday and per month are independent, sharing no correlation. This is obiously not fully true, but it is a good approximation for the chosen time-frames.
hourly_dist = (df.hour.value_counts()/df.hour.value_counts().mean()).sort_index().rename("hourly_distribution") daily_dist = (df.weekday.value_counts()/df.weekday.value_counts().mean()).sort_index().rename("daily_distribution") monthly_dist = (df.month.value_counts()/df.month.value_counts().mean()).sort_index().rename("monthly_distribution")
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We will actually make one of these per severity level. This will allow us to modify the base emergency density of a given severity as follows:
def emergency_density(gravity, hour, weekday, month): base_density = frequencies_per_grav[gravity] density = base_density * hourly_dist[hour] * daily_dist[weekday] * monthly_dist[month] return density emergency_density(3, 12, 4, 5) # Emergency frequency for severity level 3, at 12 hours of a thursday in May
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
In order for the model to read these distributions we will need to store them in a dict-like format, in this case YAML, which is easily readable by human or machine.
dists = {} for severity in range(1, 6): sub_df = df[df["severity"] == severity] frequency = float(frequencies_per_grav.round(8)[severity]) hourly_dist = (sub_df.hour. value_counts()/sub_df.hour. value_counts().mean()).sort_index().round(5).to_dict() daily_dist = (sub_df.weekday.value_counts()/sub_df.weekday.value_counts().mean()).sort_index().round(5).to_dict() monthly_dist = (sub_df.month. value_counts()/sub_df.month. value_counts().mean()).sort_index().round(5).to_dict() district_prob = (sub_df.district_code.value_counts()/sub_df.district_code.value_counts().sum()).sort_index().round(5).to_dict() dists[severity] = {"frequency": frequency, "hourly_dist": hourly_dist, "daily_dist": daily_dist, "monthly_dist": monthly_dist, "district_prob": district_prob} f = open("../data/distributions.yaml", "w+") yaml.dump(dists, f, allow_unicode=True)
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
We can now check that the dictionary stored in the YAML file is the same one we have created.
with open("../data/distributions.yaml") as dist_file: yaml_dict = yaml.safe_load(dist_file) yaml_dict == dists
_____no_output_____
MIT
notebooks/emergency_frequencies.ipynb
samurai-madrid/reinforced-learning
S3Fs Notebook ExampleS3Fs is a Pythonic file interface to S3. It builds on top of botocore.The top-level class S3FileSystem holds connection information and allows typical file-system style operations like cp, mv, ls, du, glob, etc., as well as put/get of local files to/from S3.The connection can be anonymous - in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files.API Version 2021.06.0https://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdfhttps://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdf Note: If you get errors like `ModuleNotFoundError: No module named 's3fs'`, try `pip install s3fs` in a terminal and then restart your notebook:
import json import os import s3fs
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
Load the credentials file .json to make a connection to `S3FileSystem`
tenant="standard" with open(f'/vault/secrets/minio-{tenant}-tenant-1.json') as f: creds = json.load(f)
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
The connection can be anonymous- in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files. Calling open() on a S3FileSystem (typically using a context manager) provides an S3File for read or write access to a particular key. The object emulates the standard File protocol (read, write, tell, seek), such that functions expecting a file can access S3.
HOST = creds['MINIO_URL'] SECURE = HOST.startswith('https') fs = s3fs.S3FileSystem( anon=False, use_ssl=SECURE, client_kwargs= { "region_name": "us-east-1", "endpoint_url": creds['MINIO_URL'], "aws_access_key_id": creds['AWS_ACCESS_KEY_ID'], "aws_secret_access_key": creds['AWS_SECRET_ACCESS_KEY'] } )
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
Upload a fileNow that your personal bucket exists you can upload your files! We can use`example.txt` from the same folder as this notebook.**Note:** Bucket storage doesn't actually have real directories, so you won'tfind any functions for creating them. But some software will show you adirectory structure by looking at the slashes (`/`) in the file names. We'll usethis to put `example.txt` under an `/s3fs-examples` faux directory.
# Desired location in the bucket #NB_NAMESPACE: namespace of user e.g. rohan-katkar LOCAL_FILE='example.txt' REMOTE_FILE= os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt' fs.put(LOCAL_FILE,REMOTE_FILE)
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
Check path exists in bucket
fs.exists(os.environ['NB_NAMESPACE']+'/s3fs-examples')
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
List objects in bucket
fs.ls(os.environ['NB_NAMESPACE'])
_____no_output_____
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
List objects in path
x = [] x= fs.ls(os.environ['NB_NAMESPACE'] +'/s3fs-examples') for obj in x: print(f'Name: {obj}')
Name: rohan-katkar/s3fs-examples/Happy-DAaaS-Bird.txt
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
Download a fileThere is another method `download(rpath, lpath[, recursive])`. S3Fs has issues with this method. Get is an equivalent method.
from shutil import copyfileobj DL_FILE='downloaded_s3fsexample.txt' fs.get(os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt', DL_FILE) with open(DL_FILE, 'r') as file: print(file.read())
________________ / \ | Go DAaaS!!!! | | _______________/ |/ ^____, /` `\ / ^ > / / , / «^` // /=/ % ««.~ «_/ % ««\,___% ``\ \ ^ ^
MIT
self-serve-storage/python/s3Fs Examples.ipynb
DennisH3/jupyter-notebooks
Imports and Functions
import numpy as np from scipy.stats import special_ortho_group from scipy.spatial.transform import Rotation from scipy.linalg import svd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') FIGURE_SCALE = 1.0 FONT_SIZE = 20 plt.rcParams.update({ 'figure.figsize': np.array((8, 6)) * FIGURE_SCALE, 'axes.labelsize': FONT_SIZE, 'axes.titlesize': FONT_SIZE, 'xtick.labelsize': FONT_SIZE, 'ytick.labelsize': FONT_SIZE, 'legend.fontsize': FONT_SIZE, 'lines.linewidth': 3, 'lines.markersize': 10, }) def SO3_via_svd(A): """Map 3x3 matrix onto SO(3) via SVD.""" u, s, vt = np.linalg.svd(A) s_SO3 = [1, 1, np.sign(np.linalg.det(np.matmul(u, vt)))] return np.matmul(np.matmul(u, np.diag(s_SO3)), vt) def SO3_via_gramschmidt(A): """Map 3x3 matrix on SO(3) via GS, ignores last column.""" x_normalized = A[:, 0] / np.linalg.norm(A[:, 0]) z = np.cross(x_normalized, A[:, 1]) z_normalized = z / np.linalg.norm(z) y_normalized = np.cross(z_normalized, x_normalized) return np.stack([x_normalized, y_normalized, z_normalized], axis=1) def rotate_from_z(v): """Construct a rotation matrix R such that R * [0,0,||v||]^T = v. Input v is shape (3,), output shape is 3x3 """ vn = v / np.linalg.norm(v) theta = np.arccos(vn[2]) phi = np.arctan2(vn[1], vn[0]) r = Rotation.from_euler('zyz', [0, theta, phi]) R = np.squeeze(r.as_dcm()) # Maps Z to vn return R def perturb_rotation_matrix(R, kappa): """Perturb a random rotation matrix with noise. Noise is random small rotation applied to each of the three column vectors of R. Angle of rotation is sampled from the von-Mises distribution on the circle (with uniform random azimuth). The von-Mises distribution is analagous to Gaussian distribution on the circle. Note, the concentration parameter kappa is inversely related to variance, so higher kappa means less variance, less noise applied. Good ranges for kappa are 64 (high noise) up to 512 (low noise). """ R_perturb = [] theta = np.random.vonmises(mu=0.0, kappa=kappa, size=(3,)) phi = np.random.uniform(low=0.0, high=np.pi*2.0, size=(3,)) for i in range(3): v = R[:, i] R_z_to_v = rotate_from_z(v) r_noise_z = np.squeeze(Rotation.from_euler('zyz', [0, theta[i], phi[i]]).as_dcm()) v_perturb = np.matmul(R_z_to_v, np.matmul(r_noise_z, np.array([0,0,1]))) R_perturb.append(v_perturb) R_perturb = np.stack(R_perturb, axis=-1) return R_perturb def sigma_to_kappa(sigma): return ((0.5 - sigma) * 1024) + 64 # We create a ground truth special orthogonal matrix and perturb it with # additive noise. We then see which orthogonalization process (SVD or GS) is # better at recovering the ground truth matrix. def run_expt(sigmas, num_trials, noise_type='gaussian'): # Always use identity as ground truth, or pick random matrix. # Nothing should change if we pick random (can verify by setting to True) since # SVD and Gram-Schmidt are both Equivariant to rotations. pick_random_ground_truth=False all_errs_svd = [] all_errs_gs = [] all_geo_errs_svd = [] all_geo_errs_gs = [] all_noise_norms = [] all_noise_sq_norms = [] for sig in sigmas: svd_errors = np.zeros(num_trials) gs_errors = np.zeros(num_trials) svd_geo_errors = np.zeros(num_trials) gs_geo_errors = np.zeros(num_trials) noise_norms = np.zeros(num_trials) noise_sq_norms = np.zeros(num_trials) for t in range(num_trials): if pick_random_ground_truth: A = special_ortho_group.rvs(3) # Pick a random ground truth matrix else: A = np.eye(3) # Our ground truth matrix in SO(3) N = None if noise_type == 'gaussian': N = np.random.standard_normal(size=(3,3)) * sig if noise_type == 'uniform': N = np.random.uniform(-1, 1, (3, 3)) * sig if noise_type == 'rademacher': N = np.sign(np.random.uniform(-1, 1, (3, 3))) * sig if noise_type == 'rotation': A_perturb = perturb_rotation_matrix(A, kappa=sigma_to_kappa(sig)) N = A_perturb - A if N is None: print ('Error: unknown noise_type: %s', noise_type) return AplusN = A + N # Ground-truth plus noise noise_norm = np.linalg.norm(N) noise_norm_sq = noise_norm**2 # Compute SVD result and error. res_svd = SO3_via_svd(AplusN) error_svd = np.linalg.norm(res_svd - A, ord='fro')**2 error_geodesic_svd = np.arccos( (np.trace(np.matmul(np.transpose(res_svd), A))-1.0)/2.0); # Compute GS result and error. res_gs = SO3_via_gramschmidt(AplusN) error_gs = np.linalg.norm(res_gs - A, ord='fro')**2 error_geodesic_gs = np.arccos( (np.trace(np.matmul(np.transpose(res_gs), A))-1.0)/2.0); svd_errors[t] = error_svd gs_errors[t] = error_gs svd_geo_errors[t] = error_geodesic_svd gs_geo_errors[t] = error_geodesic_gs noise_norms[t] = noise_norm noise_sq_norms[t] = noise_norm_sq all_errs_svd.append(svd_errors) all_errs_gs.append(gs_errors) all_geo_errs_svd.append(svd_geo_errors) all_geo_errs_gs.append(gs_geo_errors) all_noise_norms.append(noise_norms) all_noise_sq_norms.append(noise_sq_norms) print('finished sigma = %f / kappa = %f' % (sig, sigma_to_kappa(sig))) return [np.array(x) for x in ( all_errs_svd, all_errs_gs, all_geo_errs_svd, all_geo_errs_gs, all_noise_norms, all_noise_sq_norms)] boxprops = dict(linewidth=2) medianprops = dict(linewidth=2) whiskerprops = dict(linewidth=2) capprops = dict(linewidth=2) def make_diff_plot(svd_errs, gs_errs, xvalues, title='', ytitle='', xtitle=''): plt.figure(figsize=(8,6)) plt.title(title, fontsize=16) diff = gs_errs - svd_errs step_size = np.abs(xvalues[1] - xvalues[0]) plt.boxplot(diff.T, positions=xvalues, widths=step_size/2, whis=[5, 95], boxprops=boxprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showmeans=False, meanline=True, showfliers=False) plt.plot(xvalues, np.max(diff, axis=1), 'kx', markeredgewidth=2) plt.plot(xvalues, np.min(diff, axis=1), 'kx', markeredgewidth=2) xlim = [np.min(xvalues) - (step_size / 3), np.max(xvalues) + (step_size / 3)] plt.xlim(xlim) plt.plot(xlim, [0, 0], 'k--', linewidth=1) plt.xlabel(xtitle, fontsize=16) plt.ylabel(ytitle, fontsize=16) plt.tight_layout()
_____no_output_____
Apache-2.0
special_orthogonalization/svd_vs_gs_simulations.ipynb
wy-go/google-research