python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class PerlinNoiseGenerator(object):
def __init__(self, random_state=None):
self.rand = np.random if random_state is None else random_state
B = 256
N = 16*256
def normalize(arr):
return arr / np.linalg.norm(arr)
self.p = np.arange(2*B+2)
self.g = np.array([normalize((random_state.randint(low=0, high=2**31, size=2) % (2*B) - B )/ B)\
for i in range(2*B+2)])
for i in np.arange(B-1,-1,-1):
k = self.p[i]
j = self.rand.randint(low=0, high=2**31) % B
self.p[i] = self.p[j]
self.p[j] = k
for i in range(B+2):
self.p[B+i] = self.p[i]
self.g[B+i,:] = self.g[i,:]
self.B = B
self.N = N
def s_curve(t):
return t**2 * (3.0 - 2.0 * t)
def noise(self, x, y):
t = x + self.N
bx0 = int(t) % self.B
bx1 = (bx0+1) % self.B
rx0 = t % 1
rx1 = rx0 - 1.0
t = y + self.N
by0 = int(t) % self.B
by1 = (by0+1) % self.B
ry0 = t % 1
ry1 = ry0 - 1.0
i = self.p[bx0]
j = self.p[bx1]
b00 = self.p[i + by0]
b10 = self.p[j + by0]
b01 = self.p[i + by1]
b11 = self.p[j + by1]
sx = PerlinNoiseGenerator.s_curve(rx0)
sy = PerlinNoiseGenerator.s_curve(ry0)
u = rx0 * self.g[b00,0] + ry0 * self.g[b00,1]
v = rx1 * self.g[b10,0] + ry0 * self.g[b10,1]
a = u + sx * (v - u)
u = rx0 * self.g[b01,0] + ry1 * self.g[b01,1]
v = rx1 * self.g[b11,0] + ry1 * self.g[b11,1]
b = u + sx * (v - u)
return 1.5 * (a + sy * (b - a))
def turbulence(self, x, y, octaves):
t = 0.0
f = 1.0
while f <= octaves:
t += np.abs(self.noise(f*x, f*y)) / f
f = f * 2
return t
| augmentation-corruption-fbr_main | imagenet_c_bar/utils/perlin_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
#def smoothstep(low, high, x):
# return np.clip(3 * (x ** 2) - 2 * (x ** 3), 0, 1) * (high - low) + low
def smoothstep(low, high, x):
x = np.clip(x, low, high)
x = (x - low) / (high - low)
return np.clip(3 * (x ** 2) - 2 * (x ** 3), 0, 1)
def bilinear_interpolation(image, point):
l = int(np.floor(point[0]))
u = int(np.floor(point[1]))
r, d = l+1, u+1
lu = image[l,u,:] if l >= 0 and l < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
ld = image[l,d,:] if l >= 0 and l < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
ru = image[r,u,:] if r >= 0 and r < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
rd = image[r,d,:] if r >= 0 and r < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
al = lu * (1.0 - point[1] + u) + ld * (1.0 - d + point[1])
ar = ru * (1.0 - point[1] + u) + rd * (1.0 - d + point[1])
out = al * (1.0 - point[0] + l) + ar * (1.0 - r + point[0])
return out
| augmentation-corruption-fbr_main | imagenet_c_bar/utils/image.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def train_model(model, dataset, num_workers, gpu_device):
max_epochs = 100
batch_size = 128
base_lr = 0.1
# Cosine learning rate decay
def get_lr(cur_epoch):
return 0.5 * base_lr * (1.0 + np.cos(np.pi * cur_epoch / max_epochs))
optim = torch.optim.SGD(model.parameters(),
lr=base_lr,
nesterov=True,
momentum=0.9,
weight_decay=0.0005,
dampening=0.0,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=128,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True
)
loss_fun = torch.nn.CrossEntropyLoss().cuda(device=gpu_device)
model.train()
epoch_loss = 0
for cur_epoch in range(max_epochs):
#Set learning rate for current epoch
for param_group in optim.param_groups:
param_group['lr'] = get_lr(cur_epoch)
for inputs, labels in dataloader:
inputs = inputs.cuda(device=gpu_device)
labels = labels.cuda(device=gpu_device, non_blocking=True)
preds = model(inputs)
loss = loss_fun(preds, labels)
optim.zero_grad()
loss.backward()
optim.step()
epoch_loss += loss.item()
epoch_loss /= len(dataloader)
print("Completed epoch {}. Average training loss: {}".format(cur_epoch+1, epoch_loss))
model.eval()
| augmentation-corruption-fbr_main | notebook_utils/training_loop.py |
# This source code is adapted from code licensed under the MIT license
# found in third_party/wideresnet_license from the root directory of
# this source tree.
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
self.features = out #Expose penultimate layer for access as features
return self.fc(out)
# Stage depths for ImageNet models
_IN_STAGE_DS = {
18: (2, 2, 2, 2),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
| augmentation-corruption-fbr_main | notebook_utils/wideresnet.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from data_preprocessing import CLIMATE_VARS
from data_preprocessing.sample_quadruplets import generate_training_for_counties
from netCDF4 import Dataset
import pandas as pd
import numpy.ma as ma
from collections import defaultdict
import numpy as np
def generate_dims_for_counties(croptype):
yield_data = pd.read_csv('processed_data/crop_yield/{}_2000_2018.csv'.format(croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
ppt_fh = Dataset('experiment_data/spatial_temporal/nc_files/2014.nc', 'r')
v_ppt = ppt_fh.variables['ppt'][0, :, :]
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
counties = pd.read_csv('processed_data/counties/lst/us_counties_cro_cvm_locations.csv')
county_dic = {}
for c in counties.itertuples():
state, county, lat, lon, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat, c.lon, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat, lon, lat0, lat1, lon0, lon1]
yield_dim_csv = []
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
if (state, county) not in county_dic:
continue
lat, lon, lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 49
assert lon1 - lon0 == 49
selected_ppt = v_ppt[lat0:lat1+1, lon0:lon1+1]
if ma.count_masked(selected_ppt) != 0:
continue
yield_dim_csv.append([state, county, year, value, lat, lon])
yield_dim_csv = pd.DataFrame(yield_dim_csv, columns=['state', 'county', 'year', 'value', 'lat', 'lon'])
yield_dim_csv.to_csv('experiment_data/spatial_temporal/counties/deep_gaussian_dim_y.csv')
def get_max_min_val_for_climate_variable(img_dir):
cv_dic = defaultdict(list)
for year in range(2000, 2014):
fh = Dataset('{}/{}.nc'.format(img_dir, year))
for v_name, varin in fh.variables.items():
if v_name in CLIMATE_VARS:
cv_dic[v_name].append(varin[:].compressed())
fh.close()
for cv in CLIMATE_VARS:
values = np.asarray(cv_dic[cv])
print(cv, np.percentile(values, 95))
print(cv, np.percentile(values, 5))
if __name__ == '__main__':
generate_dims_for_counties(croptype='soybeans')
get_max_min_val_for_climate_variable('experiment_data/spatial_temporal/nc_files')
for nr in [25]:
# for nr in [10, 25, 50, 100, 500, None]:
generate_training_for_counties(out_dir='experiment_data/deep_gaussian/counties',
img_dir='experiment_data/spatial_temporal/nc_files',
start_month=3, end_month=9, start_month_index=1, n_spatial_neighbor=1, n_distant=1,
img_timestep_quadruplets=
'experiment_data/spatial_temporal/counties/img_timestep_quadruplets_hard.csv',
img_size=50, neighborhood_radius=nr, prenorm=False)
| Context-Aware-Representation-Crop-Yield-Prediction-main | generate_for_deep_gaussian.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.models.deep_gaussian_process import *
from pathlib import Path
import torch
import argparse
def train_cnn_gp(times, train_years, dropout=0.5, dense_features=None,
pred_years=range(2014, 2019), num_runs=2, train_steps=25000,
batch_size=32, starter_learning_rate=1e-3, weight_decay=0, l1_weight=0,
patience=None, use_gp=True, sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
histogram_path = Path('data/deep_gaussian/data.npz')
savedir = Path('results/deep_gaussian/nt{}_tyear{}_cnn'.format(times[0], train_years))
model = ConvModel(in_channels=9, dropout=dropout, dense_features=dense_features,
savedir=savedir, use_gp=use_gp, sigma=sigma, r_loc=r_loc,
r_year=r_year, sigma_e=sigma_e, sigma_b=sigma_b, device=device)
model.run(times, train_years, histogram_path, pred_years, num_runs, train_steps, batch_size,
starter_learning_rate, weight_decay, l1_weight, patience)
def train_rnn_gp(times, train_years, num_bins=32, hidden_size=128,
rnn_dropout=0.75, dense_features=None, pred_years=range(2014, 2019),
num_runs=2, train_steps=10000, batch_size=32, starter_learning_rate=1e-3, weight_decay=0,
l1_weight=0, patience=None, use_gp=True, sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
histogram_path = Path('data/deep_gaussian/data.npz')
savedir = Path('results/deep_gaussian/nt{}_tyear{}_rnn'.format(times[0], train_years))
model = RNNModel(in_channels=9, num_bins=num_bins, hidden_size=hidden_size,
rnn_dropout=rnn_dropout, dense_features=dense_features,
savedir=savedir, use_gp=use_gp, sigma=sigma, r_loc=r_loc, r_year=r_year,
sigma_e=sigma_e, sigma_b=sigma_b, device=device)
model.run(times, train_years, histogram_path, pred_years, num_runs, train_steps, batch_size,
starter_learning_rate, weight_decay, l1_weight, patience)
if __name__ == '__main__':
get_features_for_deep_gaussian()
parser = argparse.ArgumentParser()
parser.add_argument('--type', type=str)
parser.add_argument('--time', type=int, default=None, metavar='TIME', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
args = parser.parse_args()
model_type = args.type
times = [args.time]
train_years = args.train_years
if model_type == 'cnn':
train_cnn_gp(times, train_years)
elif model_type == 'rnn':
train_rnn_gp(times, train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_deep_gaussian.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pandas as pd
import argparse
import torch.optim as optim
from pathlib import Path
import sys
sys.path.append("..")
from crop_yield_prediction import CLIMATE_VARS
from crop_yield_prediction.models.semi_transformer import SemiTransformer
from crop_yield_prediction.train_semi_transformer import train_attention
from crop_yield_prediction.utils import plot_predict
from crop_yield_prediction.utils import plot_predict_error
from crop_yield_prediction.utils import output_to_csv_simple
from crop_yield_prediction.train_semi_transformer import eval_test
def crop_yield_train_semi_transformer(args, data_dir, model_out_dir, result_out_dir, log_out_dir, start_year, end_year,
n_tsteps, train_years=None):
batch_size = 64
test_batch_size = 128
n_triplets_per_file = 1
epochs = 50
attention_nhead = 8
adam_lr = 0.001
adam_betas = (0.9, 0.999)
n_experiment = 2
neighborhood_radius = args.neighborhood_radius
distant_radius = args.distant_radius
weight_decay = args.weight_decay
tilenet_margin = args.tilenet_margin
tilenet_l2 = args.tilenet_l2
tilenet_ltn = args.tilenet_ltn
tilenet_zdim = args.tilenet_zdim
attention_layer = args.attention_layer
attention_dff = args.attention_dff
sentence_embedding = args.sentence_embedding
dropout = args.dropout
unsup_weight = args.unsup_weight
patience = args.patience if args.patience != 9999 else None
feature = args.feature
feature_len = args.feature_len
query_type = args.query_type
assert tilenet_zdim % attention_nhead == 0
params = '{}_nt{}_nr{}_dr{}_wd{}_mar{}_l2{}_ltn{}_zd{}_al{}_adff{}_se{}_dr{}_uw{}_es{}_{}_tyear{}_qt{}'.format(
start_year,
n_tsteps,
neighborhood_radius,
distant_radius,
weight_decay,
tilenet_margin, tilenet_l2,
tilenet_ltn, tilenet_zdim,
attention_layer, attention_dff,
sentence_embedding, dropout,
unsup_weight, patience, feature,
train_years, query_type)
os.makedirs(log_out_dir, exist_ok=True)
param_model_out_dir = '{}/{}'.format(model_out_dir, params)
os.makedirs(param_model_out_dir, exist_ok=True)
param_result_out_dir = '{}/{}'.format(result_out_dir, params)
os.makedirs(param_result_out_dir, exist_ok=True)
if feature == 'all':
X_dir = '{}/nr_{}'.format(data_dir, neighborhood_radius) if distant_radius is None else \
'{}/nr_{}_dr{}'.format(data_dir, neighborhood_radius, distant_radius)
else:
X_dir = '{}/nr_{}_{}'.format(data_dir, neighborhood_radius, feature) if distant_radius is None else \
'{}/nr_{}_dr{}_{}'.format(data_dir, neighborhood_radius, distant_radius, feature)
dim_y = pd.read_csv('{}/dim_y.csv'.format(data_dir))
dim_y = dim_y.astype({'state': int, 'county': int, 'year': int, 'value': float, 'lat': float, 'lon': float})
max_index = len(dim_y) - 1
results = dict()
for year in range(start_year, end_year + 1):
print('Predict year {}......'.format(year))
test_idx = (dim_y['year'] == year)
valid_idx = (dim_y['year'] == (year - 1))
if train_years is None:
train_idx = (dim_y['year'] < (year - 1))
else:
train_idx = (dim_y['year'] < (year - 1)) & (dim_y['year'] >= (year - 1 - train_years))
y_valid, y_train = np.array(dim_y.loc[valid_idx]['value']), np.array(dim_y.loc[train_idx]['value'])
y_test, dim_test = np.array(dim_y.loc[test_idx]['value']), np.array(dim_y.loc[test_idx][['state', 'county']])
test_indices = [i for i, x in enumerate(test_idx) if x]
valid_indices = [i for i, x in enumerate(valid_idx) if x]
train_indices = [i for i, x in enumerate(train_idx) if x]
# check if the indices are sequential
assert all(elem == 1 for elem in [y - x for x, y in zip(test_indices[:-1], test_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(valid_indices[:-1], valid_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(train_indices[:-1], train_indices[1:])])
print('Train size {}, valid size {}, test size {}'.format(y_train.shape[0], y_valid.shape[0], y_test.shape[0]))
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_prediction_lis = []
for i in range(n_experiment):
print('Experiment {}'.format(i))
semi_transformer = SemiTransformer(
tn_in_channels=feature_len,
tn_z_dim=tilenet_zdim,
tn_warm_start_model=None,
sentence_embedding=sentence_embedding,
output_pred=True,
query_type=query_type,
attn_n_tsteps=n_tsteps,
d_word_vec=tilenet_zdim,
d_model=tilenet_zdim,
d_inner=attention_dff,
n_layers=attention_layer,
n_head=attention_nhead,
d_k=tilenet_zdim//attention_nhead,
d_v=tilenet_zdim//attention_nhead,
dropout=dropout,
apply_position_enc=True)
optimizer = optim.Adam(semi_transformer.parameters(), lr=adam_lr, betas=adam_betas, weight_decay=weight_decay)
trained_epochs = train_attention(model=semi_transformer,
X_dir=X_dir,
X_train_indices=(train_indices[0], train_indices[-1]),
y_train=y_train,
X_valid_indices=(valid_indices[0], valid_indices[-1]),
y_valid=y_valid,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
tilenet_margin=tilenet_margin,
tilenet_l2=tilenet_l2,
tilenet_ltn=tilenet_ltn,
unsup_weight=unsup_weight,
patience=patience,
optimizer=optimizer,
batch_size=batch_size,
test_batch_size=test_batch_size,
n_epochs=epochs,
out_dir=param_model_out_dir,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_prediction, rmse, r2, corr = eval_test(X_dir,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
batch_size=test_batch_size,
model_dir=param_model_out_dir,
model=semi_transformer,
epochs=trained_epochs,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_corr_lis.append(corr)
test_r2_lis.append(r2)
test_rmse_lis.append(rmse)
test_prediction_lis.append(test_prediction)
test_prediction = np.mean(np.asarray(test_prediction_lis), axis=0)
np.save('{}/{}.npy'.format(param_result_out_dir, year), test_prediction)
plot_predict(test_prediction, dim_test, Path('{}/pred_{}.html'.format(param_result_out_dir, year)))
plot_predict_error(test_prediction, y_test, dim_test, Path('{}/err_{}.html'.format(param_result_out_dir, year)))
results[year] = {'test_rmse': np.around(np.mean(test_rmse_lis), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_corr': np.around(np.mean(test_corr_lis), 3)}
output_to_csv_simple(results, param_result_out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop Yield Train Semi Transformer')
parser.add_argument('--neighborhood-radius', type=int, default=None, metavar='NR')
parser.add_argument('--distant-radius', type=int, default=None, metavar='DR')
parser.add_argument('--weight-decay', type=float, metavar='WDECAY')
parser.add_argument('--tilenet-margin', type=float, default=50.0, metavar='MARGIN')
parser.add_argument('--tilenet-l2', type=float, default=0.01, metavar='L2')
parser.add_argument('--tilenet-ltn', type=float, default=0.1, metavar='LTN')
parser.add_argument('--tilenet-zdim', type=int, default=512, metavar='ZDIM')
parser.add_argument('--attention-layer', type=int, default=2, metavar='ALAYER')
parser.add_argument('--attention-dff', type=int, default=2048, metavar='ADFF')
parser.add_argument('--sentence-embedding', type=str, default='simple_average', metavar='SEMD')
parser.add_argument('--query-type', type=str, default='fixed', metavar='QTYPE')
parser.add_argument('--dropout', type=float, default=0.1, metavar='DROPOUT')
parser.add_argument('--unsup-weight', type=float, default=0.2, metavar='UWEIGHT')
parser.add_argument('--patience', type=int, default=9999, metavar='PATIENCE')
parser.add_argument('--feature', type=str, default='all', metavar='FEATURE')
parser.add_argument('--feature-len', type=int, default=9, metavar='FEATURE_LEN')
parser.add_argument('--year', type=int, default=2014, metavar='YEAR')
parser.add_argument('--ntsteps', type=int, default=7, metavar='NTSTEPS', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
args = parser.parse_args()
crop_yield_train_semi_transformer(args,
data_dir='data/spatial_temporal/counties',
model_out_dir='results/spatial_temporal/models',
result_out_dir='results/spatial_temporal/results',
log_out_dir='results/spatial_temporal/prediction_logs',
start_year=args.year,
end_year=args.year,
n_tsteps=args.ntsteps,
train_years=args.train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_train_semi_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pandas as pd
import argparse
import torch.optim as optim
from pathlib import Path
import sys
sys.path.append("..")
from crop_yield_prediction.models.cnn_lstm import CnnLstm
from crop_yield_prediction.train_cnn_lstm import train_cnn_lstm
from crop_yield_prediction.utils import plot_predict
from crop_yield_prediction.utils import plot_predict_error
from crop_yield_prediction.utils import output_to_csv_simple
from crop_yield_prediction.train_cnn_lstm import eval_test
def crop_yield_train_cnn_lstm(args, data_dir, model_out_dir, result_out_dir, log_out_dir, start_year, end_year,
n_tsteps, train_years=None):
batch_size = 64
test_batch_size = 128
n_triplets_per_file = 1
epochs = 50
n_experiment = 2
patience = args.patience if args.patience != 9999 else None
feature = args.feature
feature_len = args.feature_len
tilenet_zdim = args.tilenet_zdim
lstm_inner = args.lstm_inner
params = '{}_nt{}_es{}_{}_tyear{}_zdim{}_din{}'.format(start_year, n_tsteps, patience, feature, train_years, tilenet_zdim, lstm_inner)
os.makedirs(log_out_dir, exist_ok=True)
param_model_out_dir = '{}/{}'.format(model_out_dir, params)
os.makedirs(param_model_out_dir, exist_ok=True)
param_result_out_dir = '{}/{}'.format(result_out_dir, params)
os.makedirs(param_result_out_dir, exist_ok=True)
if feature == 'all':
X_dir = '{}/nr_25_dr100'.format(data_dir)
else:
X_dir = '{}/nr_25_dr100_{}'.format(data_dir, feature)
dim_y = pd.read_csv('{}/dim_y.csv'.format(data_dir))
dim_y = dim_y.astype({'state': int, 'county': int, 'year': int, 'value': float, 'lat': float, 'lon': float})
max_index = len(dim_y) - 1
results = dict()
for year in range(start_year, end_year + 1):
print('Predict year {}......'.format(year))
test_idx = (dim_y['year'] == year)
valid_idx = (dim_y['year'] == (year - 1))
if train_years is None:
train_idx = (dim_y['year'] < (year - 1))
else:
train_idx = (dim_y['year'] < (year - 1)) & (dim_y['year'] >= (year - 1 - train_years))
y_valid, y_train = np.array(dim_y.loc[valid_idx]['value']), np.array(dim_y.loc[train_idx]['value'])
y_test, dim_test = np.array(dim_y.loc[test_idx]['value']), np.array(dim_y.loc[test_idx][['state', 'county']])
test_indices = [i for i, x in enumerate(test_idx) if x]
valid_indices = [i for i, x in enumerate(valid_idx) if x]
train_indices = [i for i, x in enumerate(train_idx) if x]
# check if the indices are sequential
assert all(elem == 1 for elem in [y - x for x, y in zip(test_indices[:-1], test_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(valid_indices[:-1], valid_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(train_indices[:-1], train_indices[1:])])
print('Train size {}, valid size {}, test size {}'.format(y_train.shape[0], y_valid.shape[0], y_test.shape[0]))
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_prediction_lis = []
for i in range(n_experiment):
print('Experiment {}'.format(i))
cnn_lstm = CnnLstm(tn_in_channels=feature_len,
tn_z_dim=tilenet_zdim,
d_model=tilenet_zdim,
d_inner=lstm_inner)
optimizer = optim.Adam(cnn_lstm.parameters(), lr=0.001)
trained_epochs = train_cnn_lstm(model=cnn_lstm,
X_dir=X_dir,
X_train_indices=(train_indices[0], train_indices[-1]),
y_train=y_train,
X_valid_indices=(valid_indices[0], valid_indices[-1]),
y_valid=y_valid,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
patience=patience,
optimizer=optimizer,
batch_size=batch_size,
test_batch_size=test_batch_size,
n_epochs=epochs,
out_dir=param_model_out_dir,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_prediction, rmse, r2, corr = eval_test(X_dir,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
batch_size=test_batch_size,
model_dir=param_model_out_dir,
model=cnn_lstm,
epochs=trained_epochs,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_corr_lis.append(corr)
test_r2_lis.append(r2)
test_rmse_lis.append(rmse)
test_prediction_lis.append(test_prediction)
test_prediction = np.mean(np.asarray(test_prediction_lis), axis=0)
np.save('{}/{}.npy'.format(param_result_out_dir, year), test_prediction)
plot_predict(test_prediction, dim_test, Path('{}/pred_{}.html'.format(param_result_out_dir, year)))
plot_predict_error(test_prediction, y_test, dim_test, Path('{}/err_{}.html'.format(param_result_out_dir, year)))
results[year] = {'test_rmse': np.around(np.mean(test_rmse_lis), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_corr': np.around(np.mean(test_corr_lis), 3)}
output_to_csv_simple(results, param_result_out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop Yield Train CNN_LSTM')
parser.add_argument('--patience', type=int, default=9999, metavar='PATIENCE')
parser.add_argument('--feature', type=str, default='all', metavar='FEATURE')
parser.add_argument('--feature-len', type=int, default=9, metavar='FEATURE_LEN')
parser.add_argument('--year', type=int, default=2014, metavar='YEAR')
parser.add_argument('--ntsteps', type=int, default=7, metavar='NTSTEPS', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
parser.add_argument('--tilenet-zdim', type=int, default=256, metavar='ZDIM')
parser.add_argument('--lstm-inner', type=int, default=512, metavar='LSTM_INNER')
args = parser.parse_args()
crop_yield_train_cnn_lstm(args,
data_dir='data/spatial_temporal/counties',
model_out_dir='results/cnn_lstm/models',
result_out_dir='results/cnn_lstm/results',
log_out_dir='results/cnn_lstm/prediction_logs',
start_year=args.year,
end_year=args.year,
n_tsteps=args.ntsteps,
train_years=args.train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_train_cnn_lstm.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pandas as pd
import argparse
import torch.optim as optim
from pathlib import Path
import sys
sys.path.append("..")
from crop_yield_prediction.models.c3d import C3D
from crop_yield_prediction.train_c3d import train_c3d
from crop_yield_prediction.utils import plot_predict
from crop_yield_prediction.utils import plot_predict_error
from crop_yield_prediction.utils import output_to_csv_simple
from crop_yield_prediction.train_c3d import eval_test
def crop_yield_train_c3d(args, data_dir, model_out_dir, result_out_dir, log_out_dir, start_year, end_year,
n_tsteps, train_years=None):
batch_size = 30
test_batch_size = 128
n_triplets_per_file = 1
epochs = 50
n_experiment = 2
patience = args.patience if args.patience != 9999 else None
feature = args.feature
feature_len = args.feature_len
params = '{}_nt{}_es{}_{}_tyear{}'.format(start_year, n_tsteps, patience, feature, train_years)
os.makedirs(log_out_dir, exist_ok=True)
param_model_out_dir = '{}/{}'.format(model_out_dir, params)
os.makedirs(param_model_out_dir, exist_ok=True)
param_result_out_dir = '{}/{}'.format(result_out_dir, params)
os.makedirs(param_result_out_dir, exist_ok=True)
if feature == 'all':
X_dir = '{}/nr_25_dr100'.format(data_dir)
else:
X_dir = '{}/nr_25_dr100_{}'.format(data_dir, feature)
dim_y = pd.read_csv('{}/dim_y.csv'.format(data_dir))
dim_y = dim_y.astype({'state': int, 'county': int, 'year': int, 'value': float, 'lat': float, 'lon': float})
max_index = len(dim_y) - 1
results = dict()
for year in range(start_year, end_year + 1):
print('Predict year {}......'.format(year))
test_idx = (dim_y['year'] == year)
valid_idx = (dim_y['year'] == (year - 1))
if train_years is None:
train_idx = (dim_y['year'] < (year - 1))
else:
train_idx = (dim_y['year'] < (year - 1)) & (dim_y['year'] >= (year - 1 - train_years))
y_valid, y_train = np.array(dim_y.loc[valid_idx]['value']), np.array(dim_y.loc[train_idx]['value'])
y_test, dim_test = np.array(dim_y.loc[test_idx]['value']), np.array(dim_y.loc[test_idx][['state', 'county']])
test_indices = [i for i, x in enumerate(test_idx) if x]
valid_indices = [i for i, x in enumerate(valid_idx) if x]
train_indices = [i for i, x in enumerate(train_idx) if x]
# check if the indices are sequential
assert all(elem == 1 for elem in [y - x for x, y in zip(test_indices[:-1], test_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(valid_indices[:-1], valid_indices[1:])])
assert all(elem == 1 for elem in [y - x for x, y in zip(train_indices[:-1], train_indices[1:])])
print('Train size {}, valid size {}, test size {}'.format(y_train.shape[0], y_valid.shape[0], y_test.shape[0]))
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_prediction_lis = []
for i in range(n_experiment):
print('Experiment {}'.format(i))
c3d = C3D(in_channels=feature_len, n_tsteps=n_tsteps)
optimizer = optim.Adam(c3d.parameters(), lr=0.001)
trained_epochs = train_c3d(model=c3d,
X_dir=X_dir,
X_train_indices=(train_indices[0], train_indices[-1]),
y_train=y_train,
X_valid_indices=(valid_indices[0], valid_indices[-1]),
y_valid=y_valid,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
patience=patience,
optimizer=optimizer,
batch_size=batch_size,
test_batch_size=test_batch_size,
n_epochs=epochs,
out_dir=param_model_out_dir,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_prediction, rmse, r2, corr = eval_test(X_dir,
X_test_indices=(test_indices[0], test_indices[-1]),
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
batch_size=test_batch_size,
model_dir=param_model_out_dir,
model=c3d,
epochs=trained_epochs,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_corr_lis.append(corr)
test_r2_lis.append(r2)
test_rmse_lis.append(rmse)
test_prediction_lis.append(test_prediction)
test_prediction = np.mean(np.asarray(test_prediction_lis), axis=0)
np.save('{}/{}.npy'.format(param_result_out_dir, year), test_prediction)
plot_predict(test_prediction, dim_test, Path('{}/pred_{}.html'.format(param_result_out_dir, year)))
plot_predict_error(test_prediction, y_test, dim_test, Path('{}/err_{}.html'.format(param_result_out_dir, year)))
results[year] = {'test_rmse': np.around(np.mean(test_rmse_lis), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_corr': np.around(np.mean(test_corr_lis), 3)}
output_to_csv_simple(results, param_result_out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop Yield Train C3D')
parser.add_argument('--patience', type=int, default=9999, metavar='PATIENCE')
parser.add_argument('--feature', type=str, default='all', metavar='FEATURE')
parser.add_argument('--feature-len', type=int, default=9, metavar='FEATURE_LEN')
parser.add_argument('--year', type=int, default=2014, metavar='YEAR')
parser.add_argument('--ntsteps', type=int, default=7, metavar='NTSTEPS', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
args = parser.parse_args()
crop_yield_train_c3d(args,
data_dir='data/spatial_temporal/counties',
model_out_dir='results/c3d/models',
result_out_dir='results/c3d/results',
log_out_dir='results/c3d/prediction_logs',
start_year=args.year,
end_year=args.year,
n_tsteps=args.ntsteps,
train_years=args.train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_train_c3d.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pandas as pd
import argparse
import torch.optim as optim
from pathlib import Path
import sys
sys.path.append("..")
from crop_yield_prediction import CLIMATE_VARS
from crop_yield_prediction.models.semi_transformer import SemiTransformer
from crop_yield_prediction.train_cross_location import train_attention
from crop_yield_prediction.utils import plot_predict
from crop_yield_prediction.utils import plot_predict_error
from crop_yield_prediction.utils import output_to_csv_simple
from crop_yield_prediction.train_cross_location import eval_test
def crop_yield_train_cross_location(args, data_dir, model_out_dir, result_out_dir, log_out_dir, start_year, end_year,
n_tsteps, train_years=None):
batch_size = 64
test_batch_size = 128
n_triplets_per_file = 1
epochs = 50
attention_nhead = 8
adam_lr = 0.001
adam_betas = (0.9, 0.999)
n_experiment = 2
neighborhood_radius = args.neighborhood_radius
distant_radius = args.distant_radius
weight_decay = args.weight_decay
tilenet_margin = args.tilenet_margin
tilenet_l2 = args.tilenet_l2
tilenet_ltn = args.tilenet_ltn
tilenet_zdim = args.tilenet_zdim
attention_layer = args.attention_layer
attention_dff = args.attention_dff
sentence_embedding = args.sentence_embedding
dropout = args.dropout
unsup_weight = args.unsup_weight
patience = args.patience if args.patience != 9999 else None
feature = args.feature
feature_len = args.feature_len
query_type = args.query_type
assert tilenet_zdim % attention_nhead == 0
if feature == 'all':
X_dir = '{}/nr_{}'.format(data_dir, neighborhood_radius) if distant_radius is None else \
'{}/nr_{}_dr{}'.format(data_dir, neighborhood_radius, distant_radius)
else:
X_dir = '{}/nr_{}_{}'.format(data_dir, neighborhood_radius, feature) if distant_radius is None else \
'{}/nr_{}_dr{}_{}'.format(data_dir, neighborhood_radius, distant_radius, feature)
dim_y = pd.read_csv('{}/dim_y.csv'.format(data_dir))
dim_y = dim_y.astype({'state': int, 'county': int, 'year': int, 'value': float, 'lat': float, 'lon': float})
max_index = len(dim_y) - 1
results = dict()
group_indices = {'1': (27, 38, 46, 55), '2': (31, 20, 19, 29), '3': (17, 18, 39, 21)}
# group_indices = {'1': (38, 46, 31, 20, 19), '2': (55, 17, 18, 39, 26)}
for year in range(start_year, end_year + 1):
for test_group in group_indices.keys():
print('Predict year {}......'.format(year))
test_idx = (dim_y['year'] == year) & (dim_y['state'].isin(group_indices[test_group]))
valid_idx = (dim_y['year'] == (year - 1)) & (dim_y['state'].isin(group_indices[test_group]))
for train_group in group_indices.keys():
params = 'train{}_test{}_{}_nt{}_nr{}_dr{}_wd{}_mar{}_l2{}_ltn{}_zd{}_al{}_adff{}_se{}_dr{}_uw{}_es{}_{}_tyear{}_qt{}'.format(
train_group,
test_group,
start_year,
n_tsteps,
neighborhood_radius,
distant_radius,
weight_decay,
tilenet_margin, tilenet_l2,
tilenet_ltn, tilenet_zdim,
attention_layer, attention_dff,
sentence_embedding, dropout,
unsup_weight, patience, feature,
train_years, query_type)
os.makedirs(log_out_dir, exist_ok=True)
param_model_out_dir = '{}/{}'.format(model_out_dir, params)
os.makedirs(param_model_out_dir, exist_ok=True)
param_result_out_dir = '{}/{}'.format(result_out_dir, params)
os.makedirs(param_result_out_dir, exist_ok=True)
if train_years is None:
train_idx = (dim_y['year'] < (year - 1)) & (dim_y['state'].isin(group_indices[train_group]))
else:
train_idx = (dim_y['year'] < (year - 1)) & (dim_y['year'] >= (year - 1 - train_years)) \
& (dim_y['state'].isin(group_indices[train_group]))
y_valid, y_train = np.array(dim_y.loc[valid_idx]['value']), np.array(dim_y.loc[train_idx]['value'])
y_test, dim_test = np.array(dim_y.loc[test_idx]['value']), np.array(
dim_y.loc[test_idx][['state', 'county']])
test_indices = [i for i, x in enumerate(test_idx) if x]
valid_indices = [i for i, x in enumerate(valid_idx) if x]
train_indices = [i for i, x in enumerate(train_idx) if x]
train_size, valid_size, test_size = y_train.shape[0], y_valid.shape[0], y_test.shape[0]
train_indices_dic = {local_index: global_index for local_index, global_index in
zip(range(train_size), train_indices)}
valid_indices_dic = {local_index: global_index for local_index, global_index in
zip(range(valid_size), valid_indices)}
test_indices_dic = {local_index: global_index for local_index, global_index in
zip(range(test_size), test_indices)}
print('Train size {}, valid size {}, test size {}'.format(train_size, valid_size, test_size))
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_prediction_lis = []
for i in range(n_experiment):
print('Experiment {}'.format(i))
semi_transformer = SemiTransformer(
tn_in_channels=feature_len,
tn_z_dim=tilenet_zdim,
tn_warm_start_model=None,
sentence_embedding=sentence_embedding,
output_pred=True,
query_type=query_type,
attn_n_tsteps=n_tsteps,
d_word_vec=tilenet_zdim,
d_model=tilenet_zdim,
d_inner=attention_dff,
n_layers=attention_layer,
n_head=attention_nhead,
d_k=tilenet_zdim // attention_nhead,
d_v=tilenet_zdim // attention_nhead,
dropout=dropout,
apply_position_enc=True)
optimizer = optim.Adam(semi_transformer.parameters(), lr=adam_lr, betas=adam_betas,
weight_decay=weight_decay)
trained_epochs = train_attention(model=semi_transformer,
X_dir=X_dir,
X_train_indices_dic=train_indices_dic,
y_train=y_train,
X_valid_indices_dic=valid_indices_dic,
y_valid=y_valid,
X_test_indices_dic=test_indices_dic,
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
tilenet_margin=tilenet_margin,
tilenet_l2=tilenet_l2,
tilenet_ltn=tilenet_ltn,
unsup_weight=unsup_weight,
patience=patience,
optimizer=optimizer,
batch_size=batch_size,
test_batch_size=test_batch_size,
n_epochs=epochs,
out_dir=param_model_out_dir,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_prediction, rmse, r2, corr = eval_test(X_dir,
X_test_indices_dic=test_indices_dic,
y_test=y_test,
n_tsteps=n_tsteps,
max_index=max_index,
n_triplets_per_file=n_triplets_per_file,
batch_size=test_batch_size,
model_dir=param_model_out_dir,
model=semi_transformer,
epochs=trained_epochs,
year=year,
exp_idx=i,
log_file='{}/{}.txt'.format(log_out_dir, params))
test_corr_lis.append(corr)
test_r2_lis.append(r2)
test_rmse_lis.append(rmse)
test_prediction_lis.append(test_prediction)
test_prediction = np.mean(np.asarray(test_prediction_lis), axis=0)
np.save('{}/{}.npy'.format(param_result_out_dir, year), test_prediction)
plot_predict(test_prediction, dim_test, Path('{}/pred_{}.html'.format(param_result_out_dir, year)))
plot_predict_error(test_prediction, y_test, dim_test,
Path('{}/err_{}.html'.format(param_result_out_dir, year)))
results[year] = {'test_rmse': np.around(np.mean(test_rmse_lis), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_corr': np.around(np.mean(test_corr_lis), 3)}
output_to_csv_simple(results, param_result_out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop Yield Train Semi Transformer')
parser.add_argument('--neighborhood-radius', type=int, default=None, metavar='NR')
parser.add_argument('--distant-radius', type=int, default=None, metavar='DR')
parser.add_argument('--weight-decay', type=float, metavar='WDECAY')
parser.add_argument('--tilenet-margin', type=float, default=50.0, metavar='MARGIN')
parser.add_argument('--tilenet-l2', type=float, default=0.01, metavar='L2')
parser.add_argument('--tilenet-ltn', type=float, default=0.1, metavar='LTN')
parser.add_argument('--tilenet-zdim', type=int, default=512, metavar='ZDIM')
parser.add_argument('--attention-layer', type=int, default=2, metavar='ALAYER')
parser.add_argument('--attention-dff', type=int, default=2048, metavar='ADFF')
parser.add_argument('--sentence-embedding', type=str, default='simple_average', metavar='SEMD')
parser.add_argument('--query-type', type=str, default='fixed', metavar='QTYPE')
parser.add_argument('--dropout', type=float, default=0.1, metavar='DROPOUT')
parser.add_argument('--unsup-weight', type=float, default=0.2, metavar='UWEIGHT')
parser.add_argument('--patience', type=int, default=9999, metavar='PATIENCE')
parser.add_argument('--feature', type=str, default='all', metavar='FEATURE')
parser.add_argument('--feature-len', type=int, default=9, metavar='FEATURE_LEN')
parser.add_argument('--year', type=int, default=2014, metavar='YEAR')
parser.add_argument('--ntsteps', type=int, default=7, metavar='NTSTEPS', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
args = parser.parse_args()
crop_yield_train_cross_location(args,
data_dir='data/spatial_temporal/counties',
model_out_dir='results/cross_location/models',
result_out_dir='results/cross_location/results',
log_out_dir='results/cross_location/prediction_logs',
start_year=args.year,
end_year=args.year,
n_tsteps=args.ntsteps,
train_years=args.train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_train_cross_location.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction import CLIMATE_VARS
import os
import numpy as np
def generate_feature_importance_data_exclude(in_dir, out_dir, exclude_group):
os.makedirs(out_dir, exist_ok=True)
include_indices = [i for i, x in enumerate(CLIMATE_VARS) if x not in exclude_group]
print(exclude_group, include_indices)
for f in os.listdir(in_dir):
if f.endswith('.npy'):
in_data = np.load('{}/{}'.format(in_dir, f))
out_data = in_data[:, :, :, include_indices, :, :]
# print(out_data.shape)
np.save('{}/{}'.format(out_dir, f), out_data)
if __name__ == '__main__':
# exclude_groups = [('ppt',), ('evi', 'ndvi'), ('elevation',), ('lst_day', 'lst_night'),
# ('clay', 'sand', 'silt')]
exclude_groups = [('ppt', 'elevation', 'lst_day', 'lst_night', 'clay', 'sand', 'silt')]
for eg in exclude_groups:
generate_feature_importance_data_exclude(in_dir='data/spatial_temporal/counties/nr_25_dr100',
out_dir='data/spatial_temporal/counties/nr_25_dr100_{}'.format('_'.join(eg)),
exclude_group=eg)
| Context-Aware-Representation-Crop-Yield-Prediction-main | generate_feature_importance_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from data_preprocessing.sample_quadruplets import generate_training_for_counties
from data_preprocessing.postprocess import mask_non_major_states
from data_preprocessing.postprocess import generate_no_spatial_for_counties
from data_preprocessing.postprocess import obtain_channel_wise_mean_std
from data_preprocessing.sample_quadruplets import generate_training_for_pretrained
if __name__ == '__main__':
# MAJOR_STATES = [17, 18, 19, 20, 27, 29, 31, 38, 39, 46, 21, 55, 26]
# ['Illinois', 'Indiana', 'Iowa', 'Kansas', 'Minnesota', 'Missouri', 'Nebraska', 'North Dakota', 'Ohio',
# 'South Dakota', 'Kentucky', 'Wisconsin', 'Michigan']
mask_non_major_states('experiment_data/spatial_temporal/nc_files_unmasked',
'experiment_data/spatial_temporal/nc_files',
'processed_data/counties/lst/us_counties.nc',
MAJOR_STATES)
generate_no_spatial_for_counties(yield_data_dir='processed_data/crop_yield',
ppt_file='experiment_data/spatial_temporal/nc_files/2003.nc',
county_location_file='processed_data/counties/lst/us_counties_cro_cvm_locations.csv',
out_dir='experiment_data/no_spatial',
img_dir='experiment_data/spatial_temporal/nc_files',
croptype='soybeans',
start_month=3,
end_month=9,
start_index=1)
obtain_channel_wise_mean_std('experiment_data/spatial_temporal/nc_files')
for nr in [5, 50, 100, 1000, None]:
generate_training_for_counties(out_dir='experiment_data/spatial_temporal/counties',
img_dir='experiment_data/spatial_temporal/nc_files',
start_month=3, end_month=9, start_month_index=1, n_spatial_neighbor=1, n_distant=1,
img_timestep_quadruplets=
'experiment_data/spatial_temporal/counties/img_timestep_quadruplets_hard.csv',
img_size=50, neighborhood_radius=nr, distant_radius=None, prenorm=True)
generate_training_for_pretrained(out_dir='experiment_data/spatial_temporal/counties',
img_dir='experiment_data/spatial_temporal/nc_files',
n_quadruplets=100000,
start_year=2003, end_year=2012, start_month=3, end_month=9, start_month_index=1,
n_spatial_neighbor=1, n_distant=1,
img_size=50, neighborhood_radius=10, distant_radius=50, prenorm=True)
generate_training_for_pretrained(out_dir='experiment_data/spatial_temporal/counties',
img_dir='experiment_data/spatial_temporal/nc_files',
n_quadruplets=100000,
start_year=2003, end_year=2012, start_month=3, end_month=9, start_month_index=1,
n_spatial_neighbor=1, n_distant=1,
img_size=50, neighborhood_radius=25, distant_radius=100, prenorm=True)
| Context-Aware-Representation-Crop-Yield-Prediction-main | generate_experiment_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.utils import Logger
from crop_yield_prediction.models import *
import os
import sys
import argparse
def predict_for_no_spatial(train_years):
log_folder = 'results/no_spatial/prediction_logs'
if not os.path.exists(log_folder):
os.makedirs(log_folder)
sys.stdout = Logger('{}/nt{}_all_results_online_learning.txt'.format(log_folder, train_years))
predict_no_spatial('data/no_spatial/soybeans_3_9.csv', 2014, 2018, 9, train_years,
'crop_yield_no_spatial/results/all')
predict_no_spatial('data/no_spatial/soybeans_3_9.csv', 2014, 2018, 8, train_years,
'crop_yield_no_spatial/results/all')
predict_no_spatial('data/no_spatial/soybeans_3_9.csv', 2014, 2018, 7, train_years,
'crop_yield_no_spatial/results/all')
predict_no_spatial('data/no_spatial/soybeans_3_9.csv', 2014, 2018, 6, train_years,
'crop_yield_no_spatial/results/all')
predict_no_spatial('data/no_spatial/soybeans_3_9.csv', 2014, 2018, 5, train_years,
'crop_yield_no_spatial/results/all')
sys.stdout.close()
sys.stdout = sys.__stdout__
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--predict', required=True)
parser.add_argument('--train-years', type=int, default=None, metavar='TRAINYEAR', required=True)
args = parser.parse_args()
predict = args.predict
train_years = args.train_years
if predict == 'no_spatial':
predict_for_no_spatial(train_years)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_no_spatial.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.dataloader import cross_location_dataloader
import os
import time
from math import sqrt
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats.stats import pearsonr
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def prep_data(batch_X, batch_y, cuda):
batch_X, batch_y = Variable(batch_X), Variable(batch_y)
if cuda:
batch_X, batch_y = batch_X.cuda(), batch_y.cuda()
return batch_X, batch_y
def train_epoch(model, train_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, optimizer, cuda):
''' Epoch operation in training phase'''
model.train()
if cuda:
model.cuda()
n_batches = len(train_dataloader)
sum_loss_dic = {}
for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
sum_loss_dic[loss_type] = 0
for batch_X, batch_y in train_dataloader:
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
optimizer.zero_grad()
emb_triplets, pred = model(batch_X, unsup_weight)
loss_func = torch.nn.MSELoss()
loss_supervised = loss_func(pred, batch_y)
if unsup_weight != 0:
loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
tilenet_margin, tilenet_l2, tilenet_ltn)
loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
else:
loss = loss_supervised
loss.backward()
optimizer.step()
# note keeping
sum_loss_dic['loss'] += loss.item()
sum_loss_dic['loss_supervised'] += loss_supervised.item()
if unsup_weight != 0:
sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
sum_loss_dic['l_n'] += l_n.item()
sum_loss_dic['l_d'] += l_d.item()
sum_loss_dic['l_nd'] += l_nd.item()
sum_loss_dic['sn_loss'] += sn_loss.item()
sum_loss_dic['tn_loss'] += tn_loss.item()
if tilenet_l2 != 0:
sum_loss_dic['norm_loss'] += norm_loss.item()
avg_loss_dic = {}
for loss_type in sum_loss_dic.keys():
avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches
return avg_loss_dic
def cal_performance(prediction, y):
rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
r2 = np.around(r2_score(y, prediction), 3)
corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]
return rmse, r2, corr
def triplet_loss(emb_triplets, margin, l2, ltn):
dim = emb_triplets.shape[-1]
z_a = emb_triplets[:, :, 0, :]
z_tn = emb_triplets[:, :, 1, :]
z_sn = emb_triplets[:, :, 2, :]
z_d = emb_triplets[:, :, 3, :]
# average over timesteps
l_n = torch.mean(torch.sqrt(((z_a - z_sn) ** 2).sum(dim=2)), dim=1)
l_d = - torch.mean(torch.sqrt(((z_a - z_d) ** 2).sum(dim=2)), dim=1)
sn_loss = F.relu(l_n + l_d + margin)
tn_loss = torch.mean(torch.sqrt(((z_a - z_tn) ** 2).sum(dim=2)), dim=1)
# average by #samples in mini-batch
l_n = torch.mean(l_n)
l_d = torch.mean(l_d)
l_nd = torch.mean(l_n + l_d)
sn_loss = torch.mean(sn_loss)
tn_loss = torch.mean(tn_loss)
loss = (1 - ltn) * sn_loss + ltn * tn_loss
norm_loss = 0
if l2 != 0:
z_a_norm = torch.sqrt((z_a ** 2).sum(dim=2))
z_sn_norm = torch.sqrt((z_sn ** 2).sum(dim=2))
z_d_norm = torch.sqrt((z_d ** 2).sum(dim=2))
z_tn_norm = torch.sqrt((z_tn ** 2).sum(dim=2))
norm_loss = torch.mean(z_a_norm + z_sn_norm + z_d_norm + z_tn_norm) / (dim ** 0.5)
loss += l2 * norm_loss
return loss, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss
def eval_epoch(model, validation_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, cuda):
''' Epoch operation in evaluation phase '''
model.eval()
if cuda:
model.cuda()
n_batches = len(validation_dataloader)
n_samples = len(validation_dataloader.dataset)
batch_size = validation_dataloader.batch_size
predictions = torch.zeros(n_samples)
# collect y as batch_y has been shuffled
y = torch.zeros(n_samples)
sum_loss_dic = {}
for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
sum_loss_dic[loss_type] = 0
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(validation_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
emb_triplets, pred = model(batch_X, unsup_weight)
loss_func = torch.nn.MSELoss()
loss_supervised = loss_func(pred, batch_y)
if unsup_weight != 0:
loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
tilenet_margin, tilenet_l2, tilenet_ltn)
loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
else:
loss = loss_supervised
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
y[start:end] = batch_y
sum_loss_dic['loss'] += loss.item()
sum_loss_dic['loss_supervised'] += loss_supervised.item()
if unsup_weight != 0:
sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
sum_loss_dic['l_n'] += l_n.item()
sum_loss_dic['l_d'] += l_d.item()
sum_loss_dic['l_nd'] += l_nd.item()
sum_loss_dic['sn_loss'] += sn_loss.item()
sum_loss_dic['tn_loss'] += tn_loss.item()
if tilenet_l2 != 0:
sum_loss_dic['norm_loss'] += norm_loss.item()
if cuda:
predictions, y = predictions.cpu(), y.cpu()
predictions, y = predictions.data.numpy(), y.data.numpy()
rmse, r2, corr = cal_performance(predictions, y)
avg_loss_dic = {}
for loss_type in sum_loss_dic.keys():
avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches
return avg_loss_dic, rmse, r2, corr
def eval_test(X_dir, X_test_indices_dic, y_test, n_tsteps, max_index, n_triplets_per_file, batch_size, model_dir, model, epochs, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}'.format(year), file=f, flush=True)
print('Test size {}'.format(y_test.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
models = []
for epoch_i in range(epochs):
models.append('{}/{}_{}_epoch{}.tar'.format(model_dir, exp_idx, year, epoch_i))
best_model = '{}/{}_{}_best.tar'.format(model_dir, exp_idx, year)
models.append(best_model)
for model_file in models:
checkpoint = torch.load(model_file) if cuda else torch.load(model_file, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if cuda:
model.cuda()
test_dataloader = cross_location_dataloader(X_dir, X_test_indices_dic, y_test, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
_, pred = model(batch_X, unsup_weight=0)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
if 'epoch' in model_file:
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", epoch=checkpoint['epoch'], rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
else:
print(' - {header:12} best selected based on validation set, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
return predictions, rmse, r2, corr
def eval_test_best_only(test_dataloader, y_test, batch_size, model, epoch, log_file):
cuda = torch.cuda.is_available()
model.eval()
if cuda:
model.cuda()
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
_, pred = model(batch_X, unsup_weight=0)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test_Best'})", epoch=epoch, rmse=rmse, r2=r2, corr=corr), file=log_file, flush=True)
def train_attention(model, X_dir, X_train_indices_dic, y_train, X_valid_indices_dic, y_valid, X_test_indices_dic, y_test, n_tsteps,
max_index, n_triplets_per_file, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, patience,
optimizer, batch_size, test_batch_size, n_epochs, out_dir, year, exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}......'.format(year), file=f, flush=True)
print('Train size {}, valid size {}'.format(y_train.shape[0], y_valid.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
train_dataloader = cross_location_dataloader(X_dir, X_train_indices_dic, y_train, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=True,
num_workers=4)
validation_dataloader = cross_location_dataloader(X_dir, X_valid_indices_dic, y_valid, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False,
num_workers=4)
test_dataloader = cross_location_dataloader(X_dir, X_test_indices_dic, y_test, n_tsteps,
max_index, n_triplets_per_file, test_batch_size, shuffle=False,
num_workers=4)
valid_rmse_min = np.inf
if patience is not None:
epochs_without_improvement = 0
for epoch_i in range(n_epochs):
print('[ Epoch', epoch_i, ']', file=f, flush=True)
start = time.time()
train_loss = train_epoch(model, train_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight,
optimizer, cuda)
print(' - {header:12} avg loss: {loss: 8.3f}, supervised loss: {supervised_loss: 8.3f}, '
'unsupervised loss: {unsupervised_loss: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Training'})", loss=train_loss['loss'], supervised_loss=train_loss['loss_supervised'],
unsupervised_loss=train_loss['loss_unsupervised'],
elapse=(time.time() - start) / 60), file=f, flush=True)
# if epoch_i in [20, 40]:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
start = time.time()
valid_loss, valid_rmse, valid_r2, valid_corr = eval_epoch(model, validation_dataloader,
tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight,
cuda)
print(' - {header:12} loss: {loss: 8.3f}, supervised loss: {supervised_loss: 8.3f}, '
'unsupervised loss: {unsupervised_loss: 8.3f}, l_n loss: {l_n: 8.3f}, l_d loss: {l_d: 8.3f}, '
'l_nd loss: {l_nd: 8.3f}, sn_loss: {sn_loss: 8.3f}, tn_loss: {tn_loss: 8.3f}, norm_loss: {norm_loss: 8.3f}, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Validation'})", loss=valid_loss['loss'], supervised_loss=valid_loss['loss_supervised'],
unsupervised_loss=valid_loss['loss_unsupervised'], l_n=valid_loss['l_n'], l_d=valid_loss['l_d'],
l_nd=valid_loss['l_nd'], sn_loss=valid_loss['sn_loss'], tn_loss=valid_loss['tn_loss'], norm_loss=valid_loss['norm_loss'],
rmse=valid_rmse, r2=valid_r2, corr=valid_corr, elapse=(time.time() - start) / 60), file=f, flush=True)
checkpoint = {'epoch': epoch_i, 'model_state_dict': model.state_dict()}
torch.save(checkpoint, '{}/{}_{}_epoch{}.tar'.format(out_dir, exp_idx, year, epoch_i))
if valid_rmse < valid_rmse_min:
eval_test_best_only(test_dataloader, y_test, test_batch_size, model, epoch_i, f)
torch.save(checkpoint, '{}/{}_{}_best.tar'.format(out_dir, exp_idx, year))
print(' - [Info] The checkpoint file has been updated at epoch {}.'.format(epoch_i), file=f, flush=True)
valid_rmse_min = valid_rmse
if patience is not None:
epochs_without_improvement = 0
elif patience is not None:
epochs_without_improvement += 1
if epochs_without_improvement == patience:
print('Early stopping!')
return epoch_i + 1
return n_epochs
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/train_cross_location.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['CLIMATE_VARS', 'STATIC_CLIMATE_VARS', 'DYNAMIC_CLIMATE_VARS']
CLIMATE_VARS = ['ppt', 'evi', 'ndvi', 'elevation', 'lst_day', 'lst_night', 'clay', 'sand', 'silt']
STATIC_CLIMATE_VARS = ['elevation', 'clay', 'sand', 'silt']
DYNAMIC_CLIMATE_VARS = [x for x in CLIMATE_VARS if x not in STATIC_CLIMATE_VARS]
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.dataloader import c3d_dataloader
import time
from math import sqrt
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats.stats import pearsonr
import numpy as np
import torch
from torch.autograd import Variable
def prep_data(batch_X, batch_y, cuda):
batch_X, batch_y = Variable(batch_X), Variable(batch_y)
if cuda:
batch_X, batch_y = batch_X.cuda(), batch_y.cuda()
return batch_X, batch_y
def train_epoch(model, train_dataloader, optimizer, cuda):
''' Epoch operation in training phase'''
model.train()
if cuda:
model.cuda()
n_batches = len(train_dataloader)
sum_loss = 0
for batch_X, batch_y in train_dataloader:
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
optimizer.zero_grad()
pred = model(batch_X)
loss_func = torch.nn.MSELoss()
loss = loss_func(pred, batch_y)
loss.backward()
optimizer.step()
# note keeping
sum_loss += loss.item()
avg_loss = sum_loss / n_batches
return avg_loss
def cal_performance(prediction, y):
rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
r2 = np.around(r2_score(y, prediction), 3)
corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]
return rmse, r2, corr
def eval_epoch(model, validation_dataloader, cuda):
''' Epoch operation in evaluation phase '''
model.eval()
if cuda:
model.cuda()
n_batches = len(validation_dataloader)
n_samples = len(validation_dataloader.dataset)
batch_size = validation_dataloader.batch_size
predictions = torch.zeros(n_samples)
# collect y as batch_y has been shuffled
y = torch.zeros(n_samples)
sum_loss = 0
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(validation_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
loss_func = torch.nn.MSELoss()
loss = loss_func(pred, batch_y)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
y[start:end] = batch_y
sum_loss += loss.item()
if cuda:
predictions, y = predictions.cpu(), y.cpu()
predictions, y = predictions.data.numpy(), y.data.numpy()
rmse, r2, corr = cal_performance(predictions, y)
avg_loss = sum_loss / n_batches
return avg_loss, rmse, r2, corr
def eval_test(X_dir, X_test_indices, y_test, n_tsteps, max_index, n_triplets_per_file, batch_size, model_dir, model, epochs, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}'.format(year), file=f, flush=True)
print('Test size {}'.format(y_test.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
models = []
for epoch_i in range(epochs):
models.append('{}/{}_{}_epoch{}.tar'.format(model_dir, exp_idx, year, epoch_i))
best_model = '{}/{}_{}_best.tar'.format(model_dir, exp_idx, year)
models.append(best_model)
for model_file in models:
checkpoint = torch.load(model_file) if cuda else torch.load(model_file, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if cuda:
model.cuda()
test_dataloader = c3d_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
if 'epoch' in model_file:
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", epoch=checkpoint['epoch'], rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
else:
print(' - {header:12} best selected based on validation set, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
return predictions, rmse, r2, corr
def eval_test_best_only(test_dataloader, y_test, batch_size, model, epoch, log_file):
cuda = torch.cuda.is_available()
model.eval()
if cuda:
model.cuda()
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test_Best'})", epoch=epoch, rmse=rmse, r2=r2, corr=corr), file=log_file, flush=True)
def train_c3d(model, X_dir, X_train_indices, y_train, X_valid_indices, y_valid, X_test_indices, y_test, n_tsteps,
max_index, n_triplets_per_file, patience, optimizer, batch_size, test_batch_size, n_epochs, out_dir, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}......'.format(year), file=f, flush=True)
print('Train size {}, valid size {}'.format(y_train.shape[0], y_valid.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
train_dataloader = c3d_dataloader(X_dir, X_train_indices[0], X_train_indices[1], y_train, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=True, num_workers=4)
validation_dataloader = c3d_dataloader(X_dir, X_valid_indices[0], X_valid_indices[1], y_valid, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
test_dataloader = c3d_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, test_batch_size, shuffle=False, num_workers=4)
valid_rmse_min = np.inf
if patience is not None:
epochs_without_improvement = 0
for epoch_i in range(n_epochs):
print('[ Epoch', epoch_i, ']', file=f, flush=True)
start = time.time()
train_loss = train_epoch(model, train_dataloader, optimizer, cuda)
print(' - {header:12} avg loss: {loss: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Training'})", loss=train_loss,
elapse=(time.time() - start) / 60), file=f, flush=True)
# if epoch_i in [20, 40]:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
start = time.time()
valid_loss, valid_rmse, valid_r2, valid_corr = eval_epoch(model, validation_dataloader, cuda)
print(' - {header:12} loss: {loss: 8.3f}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}, '
'elapse: {elapse:3.3f} min'.
format(header=f"({'Validation'})", loss=valid_loss,
rmse=valid_rmse, r2=valid_r2, corr=valid_corr, elapse=(time.time() - start) / 60), file=f,
flush=True)
checkpoint = {'epoch': epoch_i, 'model_state_dict': model.state_dict()}
torch.save(checkpoint, '{}/{}_{}_epoch{}.tar'.format(out_dir, exp_idx, year, epoch_i))
if valid_rmse < valid_rmse_min:
eval_test_best_only(test_dataloader, y_test, test_batch_size, model, epoch_i, f)
torch.save(checkpoint, '{}/{}_{}_best.tar'.format(out_dir, exp_idx, year))
print(' - [Info] The checkpoint file has been updated at epoch {}.'.format(epoch_i), file=f, flush=True)
valid_rmse_min = valid_rmse
if patience is not None:
epochs_without_improvement = 0
elif patience is not None:
epochs_without_improvement += 1
if epochs_without_improvement == patience:
print('Early stopping!')
return epoch_i + 1
return n_epochs
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/train_c3d.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.dataloader import semi_cropyield_dataloader
import os
import time
from math import sqrt
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats.stats import pearsonr
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def prep_data(batch_X, batch_y, cuda):
batch_X, batch_y = Variable(batch_X), Variable(batch_y)
if cuda:
batch_X, batch_y = batch_X.cuda(), batch_y.cuda()
return batch_X, batch_y
def train_epoch(model, train_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, optimizer, cuda):
''' Epoch operation in training phase'''
model.train()
if cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.DataParallel(model)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_batches = len(train_dataloader)
sum_loss_dic = {}
for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
sum_loss_dic[loss_type] = 0
for batch_X, batch_y in train_dataloader:
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
optimizer.zero_grad()
emb_triplets, pred = model(batch_X, unsup_weight)
loss_func = torch.nn.MSELoss()
loss_supervised = loss_func(pred, batch_y)
if unsup_weight != 0:
loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
tilenet_margin, tilenet_l2, tilenet_ltn)
loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
else:
loss = loss_supervised
loss.backward()
optimizer.step()
# note keeping
sum_loss_dic['loss'] += loss.item()
sum_loss_dic['loss_supervised'] += loss_supervised.item()
if unsup_weight != 0:
sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
sum_loss_dic['l_n'] += l_n.item()
sum_loss_dic['l_d'] += l_d.item()
sum_loss_dic['l_nd'] += l_nd.item()
sum_loss_dic['sn_loss'] += sn_loss.item()
sum_loss_dic['tn_loss'] += tn_loss.item()
if tilenet_l2 != 0:
sum_loss_dic['norm_loss'] += norm_loss.item()
avg_loss_dic = {}
for loss_type in sum_loss_dic.keys():
avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches
return avg_loss_dic
def cal_performance(prediction, y):
rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
r2 = np.around(r2_score(y, prediction), 3)
corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]
return rmse, r2, corr
def triplet_loss(emb_triplets, margin, l2, ltn):
dim = emb_triplets.shape[-1]
z_a = emb_triplets[:, :, 0, :]
z_tn = emb_triplets[:, :, 1, :]
z_sn = emb_triplets[:, :, 2, :]
z_d = emb_triplets[:, :, 3, :]
# average over timesteps
l_n = torch.mean(torch.sqrt(((z_a - z_sn) ** 2).sum(dim=2)), dim=1)
l_d = - torch.mean(torch.sqrt(((z_a - z_d) ** 2).sum(dim=2)), dim=1)
sn_loss = F.relu(l_n + l_d + margin)
tn_loss = torch.mean(torch.sqrt(((z_a - z_tn) ** 2).sum(dim=2)), dim=1)
# average by #samples in mini-batch
l_n = torch.mean(l_n)
l_d = torch.mean(l_d)
l_nd = torch.mean(l_n + l_d)
sn_loss = torch.mean(sn_loss)
tn_loss = torch.mean(tn_loss)
loss = (1 - ltn) * sn_loss + ltn * tn_loss
norm_loss = 0
if l2 != 0:
z_a_norm = torch.sqrt((z_a ** 2).sum(dim=2))
z_sn_norm = torch.sqrt((z_sn ** 2).sum(dim=2))
z_d_norm = torch.sqrt((z_d ** 2).sum(dim=2))
z_tn_norm = torch.sqrt((z_tn ** 2).sum(dim=2))
norm_loss = torch.mean(z_a_norm + z_sn_norm + z_d_norm + z_tn_norm) / (dim ** 0.5)
loss += l2 * norm_loss
return loss, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss
def eval_epoch(model, validation_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, cuda):
''' Epoch operation in evaluation phase '''
model.eval()
if cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.DataParallel(model)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_batches = len(validation_dataloader)
n_samples = len(validation_dataloader.dataset)
batch_size = validation_dataloader.batch_size
predictions = torch.zeros(n_samples)
# collect y as batch_y has been shuffled
y = torch.zeros(n_samples)
sum_loss_dic = {}
for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
sum_loss_dic[loss_type] = 0
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(validation_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
emb_triplets, pred = model(batch_X, unsup_weight)
loss_func = torch.nn.MSELoss()
loss_supervised = loss_func(pred, batch_y)
if unsup_weight != 0:
loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
tilenet_margin, tilenet_l2, tilenet_ltn)
loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
else:
loss = loss_supervised
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
y[start:end] = batch_y
sum_loss_dic['loss'] += loss.item()
sum_loss_dic['loss_supervised'] += loss_supervised.item()
if unsup_weight != 0:
sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
sum_loss_dic['l_n'] += l_n.item()
sum_loss_dic['l_d'] += l_d.item()
sum_loss_dic['l_nd'] += l_nd.item()
sum_loss_dic['sn_loss'] += sn_loss.item()
sum_loss_dic['tn_loss'] += tn_loss.item()
if tilenet_l2 != 0:
sum_loss_dic['norm_loss'] += norm_loss.item()
if cuda:
predictions, y = predictions.cpu(), y.cpu()
predictions, y = predictions.data.numpy(), y.data.numpy()
rmse, r2, corr = cal_performance(predictions, y)
avg_loss_dic = {}
for loss_type in sum_loss_dic.keys():
avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches
return avg_loss_dic, rmse, r2, corr
def eval_test(X_dir, X_test_indices, y_test, n_tsteps, max_index, n_triplets_per_file, batch_size, model_dir, model, epochs, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}'.format(year), file=f, flush=True)
print('Test size {}'.format(y_test.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
models = []
for epoch_i in range(epochs):
models.append('{}/{}_{}_epoch{}.tar'.format(model_dir, exp_idx, year, epoch_i))
best_model = '{}/{}_{}_best.tar'.format(model_dir, exp_idx, year)
models.append(best_model)
for model_file in models:
if cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.DataParallel(model)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
checkpoint = torch.load(model_file) if cuda else torch.load(model_file, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if cuda:
model.cuda()
test_dataloader = semi_cropyield_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
_, pred = model(batch_X, unsup_weight=0)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
if 'epoch' in model_file:
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", epoch=checkpoint['epoch'], rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
else:
print(' - {header:12} best selected based on validation set, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
return predictions, rmse, r2, corr
def eval_test_best_only(test_dataloader, y_test, batch_size, model, epoch, log_file):
cuda = torch.cuda.is_available()
model.eval()
if cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.DataParallel(model)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
_, pred = model(batch_X, unsup_weight=0)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test_Best'})", epoch=epoch, rmse=rmse, r2=r2, corr=corr), file=log_file, flush=True)
def train_attention(model, X_dir, X_train_indices, y_train, X_valid_indices, y_valid, X_test_indices, y_test, n_tsteps,
max_index, n_triplets_per_file, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, patience,
optimizer, batch_size, test_batch_size, n_epochs, out_dir, year, exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}......'.format(year), file=f, flush=True)
print('Train size {}, valid size {}'.format(y_train.shape[0], y_valid.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
train_dataloader = semi_cropyield_dataloader(X_dir, X_train_indices[0], X_train_indices[1], y_train, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=True,
num_workers=4)
validation_dataloader = semi_cropyield_dataloader(X_dir, X_valid_indices[0], X_valid_indices[1], y_valid, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False,
num_workers=4)
test_dataloader = semi_cropyield_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, test_batch_size, shuffle=False,
num_workers=4)
valid_rmse_min = np.inf
if patience is not None:
epochs_without_improvement = 0
for epoch_i in range(n_epochs):
print('[ Epoch', epoch_i, ']', file=f, flush=True)
start = time.time()
train_loss = train_epoch(model, train_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight,
optimizer, cuda)
print(' - {header:12} avg loss: {loss: 8.3f}, supervised loss: {supervised_loss: 8.3f}, '
'unsupervised loss: {unsupervised_loss: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Training'})", loss=train_loss['loss'], supervised_loss=train_loss['loss_supervised'],
unsupervised_loss=train_loss['loss_unsupervised'],
elapse=(time.time() - start) / 60), file=f, flush=True)
# if epoch_i in [20, 40]:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
start = time.time()
valid_loss, valid_rmse, valid_r2, valid_corr = eval_epoch(model, validation_dataloader,
tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight,
cuda)
print(' - {header:12} loss: {loss: 8.3f}, supervised loss: {supervised_loss: 8.3f}, '
'unsupervised loss: {unsupervised_loss: 8.3f}, l_n loss: {l_n: 8.3f}, l_d loss: {l_d: 8.3f}, '
'l_nd loss: {l_nd: 8.3f}, sn_loss: {sn_loss: 8.3f}, tn_loss: {tn_loss: 8.3f}, norm_loss: {norm_loss: 8.3f}, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Validation'})", loss=valid_loss['loss'], supervised_loss=valid_loss['loss_supervised'],
unsupervised_loss=valid_loss['loss_unsupervised'], l_n=valid_loss['l_n'], l_d=valid_loss['l_d'],
l_nd=valid_loss['l_nd'], sn_loss=valid_loss['sn_loss'], tn_loss=valid_loss['tn_loss'], norm_loss=valid_loss['norm_loss'],
rmse=valid_rmse, r2=valid_r2, corr=valid_corr, elapse=(time.time() - start) / 60), file=f, flush=True)
checkpoint = {'epoch': epoch_i, 'model_state_dict': model.state_dict()}
torch.save(checkpoint, '{}/{}_{}_epoch{}.tar'.format(out_dir, exp_idx, year, epoch_i))
if valid_rmse < valid_rmse_min:
eval_test_best_only(test_dataloader, y_test, test_batch_size, model, epoch_i, f)
torch.save(checkpoint, '{}/{}_{}_best.tar'.format(out_dir, exp_idx, year))
print(' - [Info] The checkpoint file has been updated at epoch {}.'.format(epoch_i), file=f, flush=True)
valid_rmse_min = valid_rmse
if patience is not None:
epochs_without_improvement = 0
elif patience is not None:
epochs_without_improvement += 1
if epochs_without_improvement == patience:
print('Early stopping!')
return epoch_i + 1
return n_epochs
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/train_semi_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.dataloader import cnn_lstm_dataloader
import time
from math import sqrt
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats.stats import pearsonr
import numpy as np
import torch
from torch.autograd import Variable
def prep_data(batch_X, batch_y, cuda):
batch_X, batch_y = Variable(batch_X), Variable(batch_y)
if cuda:
batch_X, batch_y = batch_X.cuda(), batch_y.cuda()
return batch_X, batch_y
def train_epoch(model, train_dataloader, optimizer, cuda):
''' Epoch operation in training phase'''
model.train()
if cuda:
model.cuda()
n_batches = len(train_dataloader)
sum_loss = 0
for batch_X, batch_y in train_dataloader:
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
optimizer.zero_grad()
pred = model(batch_X)
loss_func = torch.nn.MSELoss()
loss = loss_func(pred, batch_y)
loss.backward()
optimizer.step()
# note keeping
sum_loss += loss.item()
avg_loss = sum_loss / n_batches
return avg_loss
def cal_performance(prediction, y):
rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
r2 = np.around(r2_score(y, prediction), 3)
corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]
return rmse, r2, corr
def eval_epoch(model, validation_dataloader, cuda):
''' Epoch operation in evaluation phase '''
model.eval()
if cuda:
model.cuda()
n_batches = len(validation_dataloader)
n_samples = len(validation_dataloader.dataset)
batch_size = validation_dataloader.batch_size
predictions = torch.zeros(n_samples)
# collect y as batch_y has been shuffled
y = torch.zeros(n_samples)
sum_loss = 0
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(validation_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
loss_func = torch.nn.MSELoss()
loss = loss_func(pred, batch_y)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
y[start:end] = batch_y
sum_loss += loss.item()
if cuda:
predictions, y = predictions.cpu(), y.cpu()
predictions, y = predictions.data.numpy(), y.data.numpy()
rmse, r2, corr = cal_performance(predictions, y)
avg_loss = sum_loss / n_batches
return avg_loss, rmse, r2, corr
def eval_test(X_dir, X_test_indices, y_test, n_tsteps, max_index, n_triplets_per_file, batch_size, model_dir, model, epochs, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}'.format(year), file=f, flush=True)
print('Test size {}'.format(y_test.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
models = []
for epoch_i in range(epochs):
models.append('{}/{}_{}_epoch{}.tar'.format(model_dir, exp_idx, year, epoch_i))
best_model = '{}/{}_{}_best.tar'.format(model_dir, exp_idx, year)
models.append(best_model)
for model_file in models:
checkpoint = torch.load(model_file) if cuda else torch.load(model_file, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if cuda:
model.cuda()
test_dataloader = cnn_lstm_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
if 'epoch' in model_file:
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", epoch=checkpoint['epoch'], rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
else:
print(' - {header:12} best selected based on validation set, '
'rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test'})", rmse=rmse, r2=r2, corr=corr), file=f, flush=True)
return predictions, rmse, r2, corr
def eval_test_best_only(test_dataloader, y_test, batch_size, model, epoch, log_file):
cuda = torch.cuda.is_available()
model.eval()
if cuda:
model.cuda()
n_batches = len(test_dataloader)
n_samples = len(y_test)
predictions = torch.zeros(n_samples)
with torch.no_grad():
for i, (batch_X, batch_y) in enumerate(test_dataloader):
batch_X, batch_y = prep_data(batch_X, batch_y, cuda)
# forward
pred = model(batch_X)
start = i * batch_size
end = start + batch_size if i != n_batches - 1 else n_samples
predictions[start:end] = pred
if cuda:
predictions = predictions.cpu()
predictions = predictions.data.numpy()
rmse, r2, corr = cal_performance(predictions, y_test)
print(' - {header:12} epoch: {epoch: 5}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}'.
format(header=f"({'Test_Best'})", epoch=epoch, rmse=rmse, r2=r2, corr=corr), file=log_file, flush=True)
def train_cnn_lstm(model, X_dir, X_train_indices, y_train, X_valid_indices, y_valid, X_test_indices, y_test, n_tsteps,
max_index, n_triplets_per_file, patience, optimizer, batch_size, test_batch_size, n_epochs, out_dir, year,
exp_idx, log_file):
with open(log_file, 'a') as f:
print('Predict year {}......'.format(year), file=f, flush=True)
print('Train size {}, valid size {}'.format(y_train.shape[0], y_valid.shape[0]), file=f, flush=True)
print('Experiment {}'.format(exp_idx), file=f, flush=True)
cuda = torch.cuda.is_available()
train_dataloader = cnn_lstm_dataloader(X_dir, X_train_indices[0], X_train_indices[1], y_train, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=True, num_workers=4)
validation_dataloader = cnn_lstm_dataloader(X_dir, X_valid_indices[0], X_valid_indices[1], y_valid, n_tsteps,
max_index, n_triplets_per_file, batch_size, shuffle=False, num_workers=4)
test_dataloader = cnn_lstm_dataloader(X_dir, X_test_indices[0], X_test_indices[1], y_test, n_tsteps,
max_index, n_triplets_per_file, test_batch_size, shuffle=False, num_workers=4)
valid_rmse_min = np.inf
if patience is not None:
epochs_without_improvement = 0
for epoch_i in range(n_epochs):
print('[ Epoch', epoch_i, ']', file=f, flush=True)
start = time.time()
train_loss = train_epoch(model, train_dataloader, optimizer, cuda)
print(' - {header:12} avg loss: {loss: 8.3f}, elapse: {elapse:3.3f} min'.
format(header=f"({'Training'})", loss=train_loss,
elapse=(time.time() - start) / 60), file=f, flush=True)
# if epoch_i in [20, 40]:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
start = time.time()
valid_loss, valid_rmse, valid_r2, valid_corr = eval_epoch(model, validation_dataloader, cuda)
print(' - {header:12} loss: {loss: 8.3f}, rmse: {rmse: 8.3f}, r2: {r2: 8.3f}, corr: {corr: 8.3f}, '
'elapse: {elapse:3.3f} min'.
format(header=f"({'Validation'})", loss=valid_loss,
rmse=valid_rmse, r2=valid_r2, corr=valid_corr, elapse=(time.time() - start) / 60), file=f,
flush=True)
checkpoint = {'epoch': epoch_i, 'model_state_dict': model.state_dict()}
torch.save(checkpoint, '{}/{}_{}_epoch{}.tar'.format(out_dir, exp_idx, year, epoch_i))
if valid_rmse < valid_rmse_min:
eval_test_best_only(test_dataloader, y_test, test_batch_size, model, epoch_i, f)
torch.save(checkpoint, '{}/{}_{}_best.tar'.format(out_dir, exp_idx, year))
print(' - [Info] The checkpoint file has been updated at epoch {}.'.format(epoch_i), file=f, flush=True)
valid_rmse_min = valid_rmse
if patience is not None:
epochs_without_improvement = 0
elif patience is not None:
epochs_without_improvement += 1
if epochs_without_improvement == patience:
print('Early stopping!')
return epoch_i + 1
return n_epochs
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/train_cnn_lstm.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import matplotlib.pyplot as plt
from collections import defaultdict
def plot_loss(params):
out_dir = '../../results/spatial_temporal/plots/{}'.format(params[:-4])
os.makedirs(out_dir, exist_ok=True)
prediction_log = '../../results/spatial_temporal/prediction_logs/{}'.format(params)
train_epochs_dic = defaultdict(lambda: defaultdict(list))
train_loss_dic, train_super_loss_dic, train_unsuper_loss_dic = (defaultdict(lambda: defaultdict(list)) for _ in range(3))
valid_loss_dic, valid_super_loss_dic, valid_unsuper_loss_dic = (defaultdict(lambda: defaultdict(list)) for _ in range(3))
valid_l_n_loss_dic, valid_l_d_loss_dic, valid_l_nd_loss_dic, valid_sn_loss_dic, valid_tn_loss_dic, valid_norm_loss_dic = \
(defaultdict(lambda: defaultdict(list)) for _ in range(6))
valid_rmse_dic, valid_r2_dic, valid_corr_dic = (defaultdict(lambda: defaultdict(list)) for _ in range(3))
test_epochs_dic = defaultdict(lambda: defaultdict(list))
test_rmse_dic, test_r2_dic, test_corr_dic = (defaultdict(lambda: defaultdict(list)) for _ in range(3))
exp = 0
year = 0
with open(prediction_log) as f:
content = f.readlines()
for line in content:
line = line.strip()
if line.startswith('Predict'):
year = int(line.split()[2][:4])
if line.startswith('Experiment'):
exp = int(line.split()[1])
if 'Epoch' in line:
train_epochs_dic[year][exp].append(int(line.split()[2]))
if 'Training' in line:
ws = line.split()
train_loss_dic[year][exp].append(float(ws[4][:-1]))
train_super_loss_dic[year][exp].append(float(ws[7][:-1]))
train_unsuper_loss_dic[year][exp].append(float(ws[10][:-1]))
if 'Validation' in line:
ws = line.split()
valid_loss_dic[year][exp].append(float(ws[3][:-1]))
valid_super_loss_dic[year][exp].append(float(ws[6][:-1]))
valid_unsuper_loss_dic[year][exp].append(float(ws[9][:-1]))
valid_l_n_loss_dic[year][exp].append(float(ws[12][:-1]))
valid_l_d_loss_dic[year][exp].append(float(ws[15][:-1]))
valid_l_nd_loss_dic[year][exp].append(float(ws[18][:-1]))
valid_sn_loss_dic[year][exp].append(float(ws[20][:-1]))
valid_tn_loss_dic[year][exp].append(float(ws[22][:-1]))
valid_norm_loss_dic[year][exp].append(float(ws[24][:-1]))
valid_rmse_dic[year][exp].append(float(ws[26][:-1]))
valid_r2_dic[year][exp].append(float(ws[28][:-1]))
valid_corr_dic[year][exp].append(float(ws[30][:-1]))
if '(Test)' in line and 'epoch' in line:
ws = line.split()
test_epochs_dic[year][exp].append(int(ws[3][:-1]))
test_rmse_dic[year][exp].append(float(ws[5][:-1]))
test_r2_dic[year][exp].append(float(ws[7][:-1]))
test_corr_dic[year][exp].append(float(ws[9]))
for year in train_epochs_dic.keys():
n_exps = len(train_epochs_dic[year])
for i in range(n_exps):
# assert train_epochs_dic[year][i] == test_epochs_dic[year][i], params
plt.plot(train_epochs_dic[year][i], train_loss_dic[year][i], label='Training')
plt.plot(train_epochs_dic[year][i], valid_loss_dic[year][i], label='Validation')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_total_loss.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
plt.plot(train_epochs_dic[year][i], train_super_loss_dic[year][i], label='Training')
plt.plot(train_epochs_dic[year][i], valid_super_loss_dic[year][i], label='Validation')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_supervised_loss.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
plt.plot(train_epochs_dic[year][i], train_unsuper_loss_dic[year][i], label='Training')
plt.plot(train_epochs_dic[year][i], valid_unsuper_loss_dic[year][i], label='Validation')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_unsupervised_loss.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
# valid_l_n_loss, valid_l_d_loss, valid_l_nd_loss, valid_sn_loss, valid_tn_loss, valid_norm_loss
plt.plot(train_epochs_dic[year][i], valid_l_n_loss_dic[year][i], label='l_n_loss')
plt.plot(train_epochs_dic[year][i], valid_l_d_loss_dic[year][i], label='l_d_loss')
plt.plot(train_epochs_dic[year][i], valid_l_nd_loss_dic[year][i], label='l_nd_loss')
plt.plot(train_epochs_dic[year][i], valid_sn_loss_dic[year][i], label='spatial_neighbor_loss')
plt.plot(train_epochs_dic[year][i], valid_tn_loss_dic[year][i], label='temporal_neighbor_loss')
plt.plot(train_epochs_dic[year][i], valid_norm_loss_dic[year][i], label='l2_norm_loss')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_validation_various_losses.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
plt.plot(train_epochs_dic[year][i], valid_rmse_dic[year][i], label='Validation')
plt.plot(test_epochs_dic[year][i], test_rmse_dic[year][i], label='Test')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_rmse.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
plt.plot(train_epochs_dic[year][i], valid_r2_dic[year][i], label='Validation')
plt.plot(test_epochs_dic[year][i], test_r2_dic[year][i], label='Test')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_r2.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
plt.plot(train_epochs_dic[year][i], valid_corr_dic[year][i], label='Validation')
plt.plot(test_epochs_dic[year][i], test_corr_dic[year][i], label='Test')
plt.title(params, fontsize=8)
plt.grid(True)
plt.legend()
plt.savefig('{}/{}_{}_corr.jpg'.format(out_dir, year, i), dpi=300)
plt.close()
if __name__ == '__main__':
for prediction_log in os.listdir('../../results/spatial_temporal/prediction_logs'):
if prediction_log.endswith('.txt'):
print(prediction_log)
plot_loss(prediction_log)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/plot/plot_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bs4 import BeautifulSoup
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import numpy as np
import seaborn as sns
# colors = sns.color_palette("RdYlBu", 10).as_hex()
colors = ['#cdeaf3', '#9bcce2', '#fff1aa', '#fece7f', '#fa9b58', '#ee613e', '#d22b27']
SOYBEAN_QUANTILES = {0.05: 20.0, 0.2: 29.5, 0.4: 36.8, 0.6: 43.0, 0.8: 49.3, 0.95: 56.8, 0.0: 0.7, 1.0: 82.3}
def crop_yield_plot(data_dict, savepath, quantiles=SOYBEAN_QUANTILES):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
# load the svg file
svg = Path('data/counties.svg').open('r').read()
# Load into Beautiful Soup
soup = BeautifulSoup(svg, features="html.parser")
# Find counties
paths = soup.findAll('path')
path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;stroke-width:0.1' \
';stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start' \
':none;stroke-linejoin:bevel;fill:'
for p in paths:
if p['id'] not in ["State_Lines", "separator"]:
try:
rate = data_dict[p['id']]
except KeyError:
continue
if rate > quantiles[0.95]:
color_class = 6
elif rate > quantiles[0.8]:
color_class = 5
elif rate > quantiles[0.6]:
color_class = 4
elif rate > quantiles[0.4]:
color_class = 3
elif rate > quantiles[0.2]:
color_class = 2
elif rate > quantiles[0.05]:
color_class = 1
else:
color_class = 0
color = colors[color_class]
p['style'] = path_style + color
soup = soup.prettify()
with savepath.open('w') as f:
f.write(soup)
def save_colorbar(savedir, quantiles=SOYBEAN_QUANTILES):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.02, 0.8])
cmap = mpl.colors.ListedColormap(colors[1:-1])
cmap.set_over(colors[-1])
cmap.set_under(colors[0])
bounds = [quantiles[x] for x in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[quantiles[0.0]] + bounds + [quantiles[1.0]],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='vertical')
plt.savefig('{}/colorbar.jpg'.format(savedir), dpi=300, bbox_inches='tight')
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/plot/plot_crop_yield.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .plot_crop_yield import crop_yield_plot
from .plot_crop_yield_prediction_error import crop_yield_prediction_error_plot
__all__ = ['crop_yield_plot',
'crop_yield_prediction_error_plot']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/plot/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bs4 import BeautifulSoup
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import numpy as np
import seaborn as sns
# colors = sns.color_palette("RdYlBu", 10).as_hex()
colors = ["#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#d1e5f0", "#92c5de", "#4393c3", "#2166ac"]
def crop_yield_prediction_error_plot(data_dict, savepath):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
# load the svg file
svg = Path('data/counties.svg').open('r').read()
# Load into Beautiful Soup
soup = BeautifulSoup(svg, features="html.parser")
# Find counties
paths = soup.findAll('path')
path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;stroke-width:0.1' \
';stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start' \
':none;stroke-linejoin:bevel;fill:'
for p in paths:
if p['id'] not in ["State_Lines", "separator"]:
try:
rate = data_dict[p['id']]
except KeyError:
continue
if rate > 15:
color_class = 7
elif rate > 10:
color_class = 6
elif rate > 5:
color_class = 5
elif rate > 0:
color_class = 4
elif rate > -5:
color_class = 3
elif rate > -10:
color_class = 2
elif rate > -15:
color_class = 1
else:
color_class = 0
color = colors[color_class]
p['style'] = path_style + color
soup = soup.prettify()
with savepath.open('w') as f:
f.write(soup)
def save_colorbar(savedir):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.02, 0.8])
cmap = mpl.colors.ListedColormap(colors[1:-1])
cmap.set_over(colors[-1])
cmap.set_under(colors[0])
bounds = [-15, -10, -5, 0, 5, 10, 15]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[-20] + bounds + [20],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='vertical')
plt.savefig('{}/colorbar.jpg'.format(savedir), dpi=300, bbox_inches='tight')
def process_yield_data():
important_columns = ['Year', 'State ANSI', 'County ANSI', 'Value']
yield_data = pd.read_csv('../../processed_data/crop_yield/yield_data.csv').dropna(
subset=important_columns, how='any')[['Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['Year', 'State', 'County', 'Value']
yield_per_year_dic = defaultdict(dict)
for yd in yield_data.itertuples():
year, state, county, value = yd.Year, yd.State, int(yd.County), yd.Value
state = str(state).zfill(2)
county = str(county).zfill(3)
yield_per_year_dic[year][state+county] = value
return yield_per_year_dic
if __name__ == '__main__':
yield_data = process_yield_data()
for year in range(2003, 2017):
crop_yield_prediction_error_plot(yield_data[year], Path('../../processed_data/crop_yield/plots/{}_yield.html'.format(year)))
values = np.array(list(yield_data[year].values()))
print(year, np.percentile(values, 0), np.percentile(values, 25), np.percentile(values, 50),
np.percentile(values, 75), np.percentile(values, 100))
save_colorbar('../../processed_data/crop_yield/plots')
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/plot/plot_crop_yield_prediction_error.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from math import sqrt
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats.stats import pearsonr
import numpy as np
import pandas as pd
from crop_yield_prediction.plot import crop_yield_plot
from crop_yield_prediction.plot import crop_yield_prediction_error_plot
def get_statistics(y, prediction, valid):
corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))
r2 = np.around(r2_score(y, prediction), 3)
rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
if valid:
print('Validation - Pearson correlation: {}, R2: {}, RMSE: {}'.format(corr, r2, rmse))
else:
print('Test - Pearson correlation: {}, R2: {}, RMSE: {}'.format(corr, r2, rmse))
return corr, r2, rmse
def get_latest_model_dir(model_dir):
latest_folder = sorted([x for x in os.listdir(model_dir) if x.startswith('log')], key=lambda x: int(x[3:]))[-1]
return os.path.join(model_dir, latest_folder)
def get_latest_model(model_dir, cv=None):
log_folders = sorted([x for x in os.listdir(model_dir) if x.startswith('log')], key=lambda x: int(x[3:]))[-1]
check_dir = os.path.join(model_dir, log_folders) if cv is None else os.path.join(model_dir, log_folders, cv)
latest_model = sorted([x for x in os.listdir(check_dir) if x.endswith('.tar')],
key=lambda x: int(x.split('.')[0][13:]))[-1]
return os.path.join(check_dir, latest_model)
def get_latest_models_cvs(model_dir, cvs):
log_folders = sorted([x for x in os.listdir(model_dir) if x.startswith('log')], key=lambda x: int(x[3:]))[-1]
latest_models = []
for cv in cvs:
check_dir = os.path.join(model_dir, log_folders, cv)
latest_model = sorted([x for x in os.listdir(check_dir) if x.endswith('.tar')],
key=lambda x: int(x.split('.')[0][13:]))[-1]
latest_models.append(os.path.join(check_dir, latest_model))
return latest_models
def plot_predict(prediction, dim, savepath):
pred_dict = {}
for idx, pred in zip(dim, prediction):
state, county = idx
state = str(int(state)).zfill(2)
county = str(int(county)).zfill(3)
pred_dict[state + county] = pred
crop_yield_plot(pred_dict, savepath)
def plot_predict_error(prediction, real_values, dim, savepath):
test_pred_error = prediction - real_values
pred_dict = {}
for idx, err in zip(dim, test_pred_error):
state, county = idx
state = str(int(state)).zfill(2)
county = str(int(county)).zfill(3)
pred_dict[state + county] = err
crop_yield_prediction_error_plot(pred_dict, savepath)
def output_to_csv_no_spatial(results_dic, out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
years = sorted(results_dic.keys())
model_types = sorted(results_dic[years[0]].keys())
for dt in ['valid', 'test']:
data = []
for year in years:
year_data, columns = [], []
for st in ['corr', 'r2', 'rmse']:
for mt in model_types:
year_data.append(results_dic[year][mt]['{}_{}'.format(dt, st)])
columns.append('{}_{}'.format(mt, '{}_{}'.format(dt, st)))
data.append(year_data)
data = pd.DataFrame(data, columns=columns, index=years)
data.to_csv('{}/{}.csv'.format(out_dir, dt))
def output_to_csv_complex(results_dic, out_dir):
years = sorted(results_dic.keys())
model_types = sorted(results_dic[years[0]].keys())
for dt in ['train', 'test']:
data = []
for year in years:
year_data, columns = [], []
for st in ['corr', 'r2', 'rmse']:
for mt in model_types:
year_data.append(results_dic[year][mt]['{}_{}'.format(dt, st)])
columns.append('{}_{}'.format(mt, '{}_{}'.format(dt, st)))
data.append(year_data)
data = pd.DataFrame(data, columns=columns, index=years)
data.to_csv('{}/{}.csv'.format(out_dir, dt))
def output_to_csv_simple(results_dic, out_dir):
years = sorted(results_dic.keys())
data = []
for year in years:
year_data, columns = [], []
for st in ['corr', 'r2', 'rmse']:
year_data.append(results_dic[year]['test_'+st])
columns.append(st)
data.append(year_data)
data = pd.DataFrame(data, columns=columns, index=years)
data.to_csv('{}/test.csv'.format(out_dir))
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/utils/train_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .timing import timeit, timenow
from .logger import Logger
from .train_utils import get_statistics
from .train_utils import get_latest_model_dir
from .train_utils import get_latest_model
from .train_utils import get_latest_models_cvs
from .train_utils import plot_predict
from .train_utils import plot_predict_error
from .train_utils import output_to_csv_no_spatial
from .train_utils import output_to_csv_complex
from .train_utils import output_to_csv_simple
__all__ = ['timeit', 'timenow',
'Logger',
'get_statistics', 'get_latest_model_dir', 'get_latest_model', 'get_latest_models_cvs',
'plot_predict', 'plot_predict_error',
'output_to_csv_no_spatial', 'output_to_csv_complex', 'output_to_csv_simple']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a+")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def close(self):
self.log.close()
def flush(self):
pass
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/utils/logger.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://stackoverflow.com/questions/1557571/how-do-i-get-time-of-a-python-programs-execution
import atexit
from time import time, clock
from time import strftime, localtime
import functools
def _secondsToStr(t):
return "%d:%02d:%02d.%03d" % \
functools.reduce(lambda ll,b : divmod(ll[0],b) + ll[1:], [(t*1000,),1000,60,60])
def _log(s, elapsed=None):
line = "=" * 40
print(line)
print(s)
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
if elapsed:
print("Elapsed time:", elapsed)
print(line)
print()
def _endlog(start):
end = time()
elapsed = end-start
_log("End Program", _secondsToStr(elapsed))
def timenow():
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), _secondsToStr(clock()))
def timeit():
start = time()
atexit.register(_endlog, start)
_log("Start Program")
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/utils/timing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .no_spatial import predict_no_spatial
__all__ = ['predict_no_spatial']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import numpy as np
import os
import sys
from pathlib import Path
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import PredefinedSplit
from scipy.stats import randint
from scipy.stats import uniform
sys.path.append("..")
from crop_yield_prediction.utils import Logger
from crop_yield_prediction.utils import plot_predict
from crop_yield_prediction.utils import plot_predict_error
from crop_yield_prediction.utils import get_statistics
from crop_yield_prediction.utils import output_to_csv_no_spatial
def _train_tuned_ridge_regression(train_valid, train, valid, test, predefined_split):
print('Tuned Ridge Regression')
X_scaler = StandardScaler()
X_scaler.fit(train['X'])
X_train_valid = X_scaler.transform(train_valid['X'])
X_valid = X_scaler.transform(valid['X'])
X_test = X_scaler.transform(test['X'])
regr = Ridge()
params = {'alpha': [10000, 5000, 1000, 500, 100, 75, 50, 25, 10, 1.0, 0.0001, 0]}
regr_search = GridSearchCV(regr, params, cv=predefined_split)
regr_search.fit(X_train_valid, train_valid['y'])
# print(regr_search.best_params_)
valid_prediction = regr_search.predict(X_valid)
test_prediction = regr_search.predict(X_test)
valid_corr, valid_r2, valid_rmse = get_statistics(valid['y'], valid_prediction, valid=True)
test_corr, test_r2, test_rmse = get_statistics(test['y'], test_prediction, valid=False)
return {'valid_corr': valid_corr[0], 'valid_r2': valid_r2, 'valid_rmse': valid_rmse,
'test_corr': test_corr[0], 'test_r2': test_r2, 'test_rmse': test_rmse}, test_prediction
def _train_tuned_random_forest(train_valid, train, valid, test, predefined_split):
print('Tuned RandomForest')
valid_corr_lis, valid_r2_lis, valid_rmse_lis = [], [], []
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_predictions = []
for i in range(2):
n_features = train_valid['X'].shape[1]
max_features = randint(1, n_features + 1)
min_samples_split = randint(2, 51)
min_samples_leaf = randint(1, 51)
min_weight_fraction_leaf = uniform(0.0, 0.5)
max_leaf_nodes = randint(10, 1001)
params = {'max_features': max_features,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'min_weight_fraction_leaf': min_weight_fraction_leaf,
'max_leaf_nodes': max_leaf_nodes}
rf_search = RandomizedSearchCV(estimator=RandomForestRegressor(n_estimators=100, n_jobs=-1),
param_distributions=params,
n_iter=100,
cv=predefined_split,
n_jobs=-1)
rf_search.fit(train_valid['X'], train_valid['y'])
# print(rf_search.best_params_)
valid_prediction = rf_search.predict(valid['X'])
test_prediction = rf_search.predict(test['X'])
valid_corr, valid_r2, valid_rmse = get_statistics(valid['y'], valid_prediction, valid=True)
test_corr, test_r2, test_rmse = get_statistics(test['y'], test_prediction, valid=False)
test_predictions.append(np.asarray(test_prediction))
valid_corr_lis.append(valid_corr)
valid_r2_lis.append(valid_r2)
valid_rmse_lis.append(valid_rmse)
test_corr_lis.append(test_corr)
test_r2_lis.append(test_r2)
test_rmse_lis.append(test_rmse)
return {'valid_corr': np.around(np.mean([x[0] for x in valid_corr_lis]), 3),
'valid_r2': np.around(np.mean(valid_r2_lis), 3),
'valid_rmse': np.around(np.mean(valid_rmse_lis), 3),
'test_corr': np.around(np.mean([x[0] for x in test_corr_lis]), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_rmse': np.around(np.mean(test_rmse_lis), 3)}, np.mean(np.asarray(test_predictions), axis=0)
def _train_tuned_neural_network(train_valid, train, valid, test, predefined_split):
print('Tuned neural network')
X_scaler = StandardScaler()
X_scaler.fit(train['X'])
X_train_valid = X_scaler.transform(train_valid['X'])
X_valid = X_scaler.transform(valid['X'])
X_test = X_scaler.transform(test['X'])
valid_corr_lis, valid_r2_lis, valid_rmse_lis = [], [], []
test_corr_lis, test_r2_lis, test_rmse_lis = [], [], []
test_predictions = []
for i in range(2):
params = {'hidden_layer_sizes': [(256, 256), (256, )],
'learning_rate': ['adaptive', 'constant'],
'learning_rate_init': [0.01, 0.001],
'alpha': [0.0001, 0.001, 0.05, 0.1],
'activation': ['tanh', 'relu']
}
mlp_search = GridSearchCV(estimator=MLPRegressor(max_iter=500),
param_grid=params,
cv=predefined_split,
n_jobs=-1)
mlp_search.fit(X_train_valid, train_valid['y'])
# print(mlp_search.best_params_)
valid_prediction = mlp_search.predict(X_valid)
test_prediction = mlp_search.predict(X_test)
valid_corr, valid_r2, valid_rmse = get_statistics(valid['y'], valid_prediction, valid=True)
test_corr, test_r2, test_rmse = get_statistics(test['y'], test_prediction, valid=False)
test_predictions.append(np.asarray(test_prediction))
valid_corr_lis.append(valid_corr)
valid_r2_lis.append(valid_r2)
valid_rmse_lis.append(valid_rmse)
test_corr_lis.append(test_corr)
test_r2_lis.append(test_r2)
test_rmse_lis.append(test_rmse)
return {'valid_corr': np.around(np.mean([x[0] for x in valid_corr_lis]), 3),
'valid_r2': np.around(np.mean(valid_r2_lis), 3),
'valid_rmse': np.around(np.mean(valid_rmse_lis), 3),
'test_corr': np.around(np.mean([x[0] for x in test_corr_lis]), 3),
'test_r2': np.around(np.mean(test_r2_lis), 3),
'test_rmse': np.around(np.mean(test_rmse_lis), 3)}, np.mean(np.asarray(test_predictions), axis=0)
def _predict_crop_yield(train_valid_df, train_df, valid_df, test_df, predefined_split, out_dir, year, end_month):
all_features = [x for x in train_valid_df.columns.values
if x not in ['year', 'state', 'county', 'yield'] and not x.endswith('mean')]
features = []
for f in all_features:
if f[-1].isdigit():
if int(f[-1]) <= end_month:
features.append(f)
else:
features.append(f)
print(features)
dims = ['state', 'county']
train_valid = {'y': np.array(train_valid_df['yield']), 'X': np.array(train_valid_df[features])}
train = {'y': np.array(train_df['yield']), 'X': np.array(train_df[features])}
valid = {'y': np.array(valid_df['yield']), 'X': np.array(valid_df[features])}
test = {'y': np.array(test_df['yield']), 'X': np.array(test_df[features])}
dim_test = np.array(test_df[dims])
tuned_rr, tuned_rr_test_prediction = _train_tuned_ridge_regression(train_valid, train, valid, test, predefined_split)
tuned_rf, tuned_rf_test_prediction = _train_tuned_random_forest(train_valid, train, valid, test, predefined_split)
tuned_nn, tuned_nn_test_prediction = _train_tuned_neural_network(train_valid, train, valid, test, predefined_split)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for prediction, name in zip([tuned_rr_test_prediction,
tuned_rf_test_prediction,
tuned_nn_test_prediction],
['tuned_rr', 'tuned_rf', 'tuned_nn']):
np.save('{}/{}_{}.npy'.format(out_dir, name, year), prediction)
plot_predict(prediction, dim_test, Path('{}/pred_{}_{}.html'.format(out_dir, name, year)))
plot_predict_error(prediction, test['y'], dim_test, Path('{}/err_{}_{}.html'.format(out_dir, name, year)))
return {'tuned_rr': tuned_rr, 'tuned_rf': tuned_rf, 'tuned_nn': tuned_nn}
def predict_no_spatial(csv_file, start_year, end_year, end_month, train_years, out_dir):
print('Predict for {}...........'.format(csv_file))
data = pd.read_csv(csv_file)
results = {}
for year in range(start_year, end_year+1):
print('Predict year {}.....'.format(year))
train_valid_data = data.loc[(data['year'] < year) & (data['year'] >= (year - (train_years + 1)))]
train_data = data.loc[(data['year'] < (year - 1)) & (data['year'] >= (year - (train_years + 1)))]
valid_data = data.loc[data['year'] == year - 1]
test_data = data.loc[data['year'] == year]
print('Train size {}, validate size {}, test size {}'.format(len(train_data), len(valid_data), len(test_data)))
valid_index = train_valid_data['year'] == year - 1
valid_fold = [0 if x else -1 for x in valid_index]
predefined_split = PredefinedSplit(test_fold=valid_fold)
year_results = _predict_crop_yield(train_valid_data, train_data, valid_data, test_data,
predefined_split, '{}/nt{}_end_month{}'.format(out_dir, train_years, end_month), year, end_month)
results[year] = year_results
output_to_csv_no_spatial(results, '{}/nt{}_end_month{}'.format(out_dir, train_years, end_month))
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/no_spatial.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.models.semi_transformer.TileNet import make_tilenet
import torch
import torch.nn as nn
class CnnLstm(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
def __init__(self, tn_in_channels, tn_z_dim, d_model=512, d_inner=2048):
super().__init__()
self.tilenet = make_tilenet(tn_in_channels, tn_z_dim)
self.encoder = nn.LSTM(d_model, d_inner, batch_first=True)
self.predict_proj = nn.Linear(d_inner, 1)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, x):
"""
Input x: (n_batches, n_tsteps, n_triplets, n_var, img_height, img_width)
"""
n_batches, n_tsteps, n_vars, img_size = x.shape[:-1]
x = x.view(n_batches * n_tsteps, n_vars, img_size, img_size)
emb_x = self.tilenet(x)
emb_x = emb_x.view(n_batches, n_tsteps, -1)
enc_output, *_ = self.encoder(emb_x)
enc_output = enc_output[:, -1, :]
pred = torch.squeeze(self.predict_proj(enc_output))
return pred
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/cnn_lstm/cnn_lstm.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.cnn_lstm.cnn_lstm import CnnLstm
__all__ = ['CnnLstm']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/cnn_lstm/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
from crop_yield_prediction import CLIMATE_VARS
import pandas as pd
import numpy as np
MAX_BIN_VAL = {'ppt': 179.812, 'evi': 0.631, 'ndvi': 0.850, 'elevation': 961.420, 'lst_day': 309.100,
'lst_night': 293.400, 'clay': 47.0, 'sand': 91.0, 'silt': 70.0}
MIN_BIN_VAL = {'ppt': 11.045, 'evi': 0.084, 'ndvi': 0.138, 'elevation': 175.0, 'lst_day': 269.640,
'lst_night': 261.340, 'clay': 4.0, 'sand': 13.0, 'silt': 10.0}
def _calculate_histogram(image, num_bins=32):
"""
Input image shape: (n_variables, n_timesteps, 50, 50)
"""
hist = []
n_variables, n_timesteps = image.shape[:2]
for var_idx in range(n_variables):
bin_seq = np.linspace(MIN_BIN_VAL[CLIMATE_VARS[var_idx]], MAX_BIN_VAL[CLIMATE_VARS[var_idx]], num_bins + 1)
im = image[var_idx]
imhist = []
for ts_idx in range(n_timesteps):
density, _ = np.histogram(im[ts_idx, :, :], bin_seq, density=False)
# max() prevents divide by 0
imhist.append(density / max(1, density.sum()))
hist.append(np.stack(imhist))
# [bands, times, bins]
hist = np.stack(hist)
return hist
def get_features_for_deep_gaussian():
output_images = []
yields = []
years = []
locations = []
state_county_info = []
yield_data = pd.read_csv('data/deep_gaussian/deep_gaussian_dim_y.csv')[['state', 'county', 'year', 'value', 'lat', 'lon']]
yield_data.columns = ['state', 'county', 'year', 'value', 'lat', 'lon']
for idx, yield_data in enumerate(yield_data.itertuples()):
year, county, state = yield_data.year, yield_data.county, yield_data.state
# [1, n_timesteps, 1+n_temporal_neighbor+n_spatial_neighbor+n_distant, n_variables, 50, 50]
image = np.load('data/deep_gaussian/nr_25/{}.npy'.format(idx))
# get anchor image from shape (1, n_timesteps, 4, n_variables, 50, 50)
# to shape (n_timestep, n_variables, 50, 50)
image = image[0, :, 0, :, :, :]
# shape (n_variables, n_timesteps, 50, 50)
image = np.swapaxes(image, 0, 1)
image = _calculate_histogram(image, num_bins=32)
output_images.append(image)
yields.append(yield_data.value)
years.append(year)
lat, lon = yield_data.lat, yield_data.lon
locations.append(np.array([lon, lat]))
state_county_info.append(np.array([int(state), int(county)]))
# print(f'County: {int(county)}, State: {state}, Year: {year}, Output shape: {image.shape}')
np.savez('data/deep_gaussian/data.npz',
output_image=np.stack(output_images), output_yield=np.array(yields),
output_year=np.array(years), output_locations=np.stack(locations),
output_index=np.stack(state_county_info))
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/feature_engineering.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
import torch
from torch import nn
import torch.nn.functional as F
from pathlib import Path
from .base import ModelBase
class ConvModel(ModelBase):
"""
A PyTorch replica of the CNN structured model from the original paper. Note that
this class assumes feature_engineering was run with channels_first=True
Parameters
----------
in_channels: int, default=9
Number of channels in the input data. Default taken from the number of bands in the
MOD09A1 + the number of bands in the MYD11A2 datasets
dropout: float, default=0.5
Default taken from the original paper
dense_features: list, or None, default=None.
output feature size of the Linear layers. If None, default values will be taken from the paper.
The length of the list defines how many linear layers are used.
time: int, default=32
The number of timesteps being used. This is necessary to pass in the initializer since it will
affect the size of the first dense layer, which is the flattened output of the conv layers
savedir: pathlib Path, default=Path('data/models')
The directory into which the models should be saved.
device: torch.device
Device to run model on. By default, checks for a GPU. If none exists, uses
the CPU
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=7,
savedir=Path('results/deep_gaussian/models'), use_gp=True, sigma=1, r_loc=0.5, r_year=1.5,
sigma_e=0.01, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
# save values for reinitialization
self.in_channels = in_channels
self.dropout = dropout
self.dense_features = dense_features
self.time = time
model = ConvNet(in_channels=in_channels, dropout=dropout,
dense_features=dense_features, time=time)
if dense_features is None:
num_dense_layers = 2
else:
num_dense_layers = len(dense_features)
model_weight = f'dense_layers.{num_dense_layers - 1}.weight'
model_bias = f'dense_layers.{num_dense_layers - 1}.bias'
super().__init__(model, model_weight, model_bias, 'cnn', savedir, use_gp, sigma, r_loc,
r_year, sigma_e, sigma_b, device)
def reinitialize_model(self, time=None):
# the only thing which changes here is the time value, since this affects the
# size of the first dense layer.
if time is None:
time = self.time
model = ConvNet(in_channels=self.in_channels, dropout=self.dropout,
dense_features=self.dense_features, time=time)
if self.device.type != 'cpu':
model = model.cuda()
self.model = model
class ConvNet(nn.Module):
"""
A crop yield conv net.
For a description of the parameters, see the ConvModel class.
Only handles strides of 1 and 2
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=32):
super().__init__()
# values taken from the paper
in_out_channels_list = [in_channels, 128, 256, 256, 512, 512, 512]
stride_list = [None, 1, 2, 1, 2, 1, 2]
# Figure out the size of the final flattened conv layer, which
# is dependent on the input size
num_divisors = sum([1 if i == 2 else 0 for i in stride_list])
for i in range(num_divisors):
if time % 2 != 0:
time += 1
time /= 2
if dense_features is None:
dense_features = [2048, 1]
dense_features.insert(0, int(in_out_channels_list[-1] * time * 4))
assert len(stride_list) == len(in_out_channels_list), \
"Stride list and out channels list must be the same length!"
self.convblocks = nn.ModuleList([
ConvBlock(in_channels=in_out_channels_list[i-1],
out_channels=in_out_channels_list[i],
kernel_size=3, stride=stride_list[i],
dropout=dropout) for
i in range(1, len(stride_list))
])
self.dense_layers = nn.ModuleList([
nn.Linear(in_features=dense_features[i-1],
out_features=dense_features[i]) for
i in range(1, len(dense_features))
])
self.initialize_weights()
def initialize_weights(self):
for convblock in self.convblocks:
nn.init.kaiming_uniform_(convblock.conv.weight.data)
# http://cs231n.github.io/neural-networks-2/#init
# see: Initializing the biases
nn.init.constant_(convblock.conv.bias.data, 0)
for dense_layer in self.dense_layers:
nn.init.kaiming_uniform_(dense_layer.weight.data)
nn.init.constant_(dense_layer.bias.data, 0)
def forward(self, x, return_last_dense=False):
"""
If return_last_dense is true, the feature vector generated by the second to last
dense layer will also be returned. This is then used to train a Gaussian Process model.
"""
for block in self.convblocks:
x = block(x)
# flatten
x = x.view(x.shape[0], -1)
for layer_number, dense_layer in enumerate(self.dense_layers):
x = dense_layer(x)
if return_last_dense and (layer_number == len(self.dense_layers) - 2):
output = x
if return_last_dense:
return x, output
return x
class ConvBlock(nn.Module):
"""
A 2D convolution, followed by batchnorm, a ReLU activation, and dropout
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.conv = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.batchnorm(self.conv(x)))
return self.dropout(x)
class Conv2dSamePadding(nn.Conv2d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
This solution is mostly copied from
https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.stride,
self.dilation, self.groups)
def conv2d_same_padding(input, weight, bias=None, stride=1, dilation=1, groups=1):
# stride and dilation are expected to be tuples.
# first, we'll figure out how much padding is necessary for the rows
input_rows = input.size(2)
filter_rows = weight.size(2)
effective_filter_size_rows = (filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + effective_filter_size_rows - input_rows)
rows_odd = (padding_rows % 2 != 0)
# same for columns
input_cols = input.size(3)
filter_cols = weight.size(3)
effective_filter_size_cols = (filter_cols - 1) * dilation[1] + 1
out_cols = (input_cols + stride[1] - 1) // stride[1]
padding_cols = max(0, (out_cols - 1) * stride[1] + effective_filter_size_cols - input_cols)
cols_odd = (padding_cols % 2 != 0)
if rows_odd or cols_odd:
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=dilation, groups=groups)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/convnet.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
import numpy as np
from scipy.spatial.distance import pdist, squareform
class GaussianProcess:
"""
The crop yield Gaussian process
"""
def __init__(self, sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01):
self.sigma = sigma
self.r_loc = r_loc
self.r_year = r_year
self.sigma_e = sigma_e
self.sigma_b = sigma_b
@staticmethod
def _normalize(x):
x_mean = np.mean(x, axis=0, keepdims=True)
x_scale = np.ptp(x, axis=0, keepdims=True)
return (x - x_mean) / x_scale
def run(self, feat_train, feat_test, loc_train, loc_test, year_train, year_test,
train_yield, model_weights, model_bias):
# makes sure the features have an additional testue for the bias term
# We call the features H since the features are used as the basis functions h(x)
H_train = np.concatenate((feat_train, np.ones((feat_train.shape[0], 1))), axis=1)
H_test = np.concatenate((feat_test, np.ones((feat_test.shape[0], 1))), axis=1)
Y_train = np.expand_dims(train_yield, axis=1)
n_train = feat_train.shape[0]
n_test = feat_test.shape[0]
locations = self._normalize(np.concatenate((loc_train, loc_test), axis=0))
years = self._normalize(np.concatenate((year_train, year_test), axis=0))
# to calculate the se_kernel, a dim=2 array must be passed
years = np.expand_dims(years, axis=1)
# These are the squared exponential kernel function we'll use for the covariance
se_loc = squareform(pdist(locations, 'euclidean')) ** 2 / (self.r_loc ** 2)
se_year = squareform(pdist(years, 'euclidean')) ** 2 / (self.r_year ** 2)
# make the dirac matrix we'll add onto the kernel function
noise = np.zeros([n_train + n_test, n_train + n_test])
noise[0: n_train, 0: n_train] += (self.sigma_e ** 2) * np.identity(n_train)
kernel = ((self.sigma ** 2) * np.exp(-se_loc) * np.exp(-se_year)) + noise
# since B is diagonal, and B = self.sigma_b * np.identity(feat_train.shape[1]),
# its easy to calculate the inverse of B
B_inv = np.identity(H_train.shape[1]) / self.sigma_b
# "We choose b as the weight vector of the last layer of our deep models"
b = np.concatenate((model_weights.transpose(1, 0), np.expand_dims(model_bias, 1)))
K_inv = np.linalg.inv(kernel[0: n_train, 0: n_train])
# The definition of beta comes from equation 2.41 in Rasmussen (2006)
beta = np.linalg.inv(B_inv + H_train.T.dot(K_inv).dot(H_train)).dot(
H_train.T.dot(K_inv).dot(Y_train) + B_inv.dot(b))
# We take the mean of g(X*) as our prediction, also from equation 2.41
pred = H_test.dot(beta) + \
kernel[n_train:, :n_train].dot(K_inv).dot(Y_train - H_train.dot(beta))
return pred
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/gp.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .feature_engineering import get_features_for_deep_gaussian
from .convnet import ConvModel
from .rnn import RNNModel
__all__ = ['get_features_for_deep_gaussian',
'ConvModel',
'RNNModel']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
import torch.nn.functional as F
def l1_l2_loss(pred, true, l1_weight, scores_dict):
"""
Regularized MSE loss; l2 loss with l1 loss too.
Parameters
----------
pred: torch.floatTensor
The model predictions
true: torch.floatTensor
The true values
l1_weight: int
The value by which to weight the l1 loss
scores_dict: defaultdict(list)
A dict to which scores can be appended.
Returns
----------
loss: the regularized mse loss
"""
loss = F.mse_loss(pred, true)
scores_dict['l2'].append(loss.item())
if l1_weight > 0:
l1 = F.l1_loss(pred, true)
loss += l1
scores_dict['l1'].append(l1.item())
scores_dict['loss'].append(loss.item())
return loss, scores_dict
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
from torch import nn
import torch
import math
from pathlib import Path
from .base import ModelBase
class RNNModel(ModelBase):
"""
A PyTorch replica of the RNN structured model from the original paper. Note that
this class assumes feature_engineering was run with channels_first=True
Parameters
----------
in_channels: int, default=9
Number of channels in the input data. Default taken from the number of bands in the
MOD09A1 + the number of bands in the MYD11A2 datasets
num_bins: int, default=32
Number of bins in the histogram
hidden_size: int, default=128
The size of the hidden state. Default taken from the original repository
rnn_dropout: float, default=0.75
Default taken from the original paper. Note that this dropout is applied to the
hidden state after each timestep, not after each layer (since there is only one layer)
dense_features: list, or None, default=None.
output feature size of the Linear layers. If None, default values will be taken from the paper.
The length of the list defines how many linear layers are used.
savedir: pathlib Path, default=Path('data/models')
The directory into which the models should be saved.
device: torch.device
Device to run model on. By default, checks for a GPU. If none exists, uses
the CPU
"""
def __init__(self, in_channels=9, num_bins=32, hidden_size=128, rnn_dropout=0.75,
dense_features=None, savedir=Path('data/models'), use_gp=True,
sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.01, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
model = RNNet(in_channels=in_channels, num_bins=num_bins, hidden_size=hidden_size,
num_rnn_layers=1, rnn_dropout=rnn_dropout,
dense_features=dense_features)
if dense_features is None:
num_dense_layers = 2
else:
num_dense_layers = len(dense_features)
model_weight = f'dense_layers.{num_dense_layers - 1}.weight'
model_bias = f'dense_layers.{num_dense_layers - 1}.bias'
super().__init__(model, model_weight, model_bias, 'rnn', savedir, use_gp, sigma, r_loc, r_year,
sigma_e, sigma_b, device)
def reinitialize_model(self, time=None):
self.model.initialize_weights()
class RNNet(nn.Module):
"""
A crop yield conv net.
For a description of the parameters, see the RNNModel class.
"""
def __init__(self, in_channels=9, num_bins=32, hidden_size=128, num_rnn_layers=1,
rnn_dropout=0.25, dense_features=None):
super().__init__()
if dense_features is None:
dense_features = [256, 1]
dense_features.insert(0, hidden_size)
self.dropout = nn.Dropout(rnn_dropout)
self.rnn = nn.LSTM(input_size=in_channels * num_bins,
hidden_size=hidden_size,
num_layers=num_rnn_layers,
batch_first=True)
self.hidden_size = hidden_size
self.dense_layers = nn.ModuleList([
nn.Linear(in_features=dense_features[i-1],
out_features=dense_features[i])
for i in range(1, len(dense_features))
])
self.initialize_weights()
def initialize_weights(self):
sqrt_k = math.sqrt(1 / self.hidden_size)
for parameters in self.rnn.all_weights:
for pam in parameters:
nn.init.uniform_(pam.data, -sqrt_k, sqrt_k)
for dense_layer in self.dense_layers:
nn.init.kaiming_uniform_(dense_layer.weight.data)
nn.init.constant_(dense_layer.bias.data, 0)
def forward(self, x, return_last_dense=False):
"""
If return_last_dense is true, the feature vector generated by the second to last
dense layer will also be returned. This is then used to train a Gaussian Process model.
"""
# the model expects feature_engineer to have been run with channels_first=True, which means
# the input is [batch, bands, times, bins].
# Reshape to [batch, times, bands * bins]
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
sequence_length = x.shape[1]
hidden_state = torch.zeros(1, x.shape[0], self.hidden_size)
cell_state = torch.zeros(1, x.shape[0], self.hidden_size)
if x.is_cuda:
hidden_state = hidden_state.cuda()
cell_state = cell_state.cuda()
for i in range(sequence_length):
# The reason the RNN is unrolled here is to apply dropout to each timestep;
# The rnn_dropout argument only applies it after each layer. This better mirrors
# the behaviour of the Dropout Wrapper used in the original repository
# https://www.tensorflow.org/api_docs/python/tf/nn/rnn_cell/DropoutWrapper
input_x = x[:, i, :].unsqueeze(1)
_, (hidden_state, cell_state) = self.rnn(input_x,
(hidden_state, cell_state))
hidden_state = self.dropout(hidden_state)
x = hidden_state.squeeze(0)
for layer_number, dense_layer in enumerate(self.dense_layers):
x = dense_layer(x)
if return_last_dense and (layer_number == len(self.dense_layers) - 2):
output = x
if return_last_dense:
return x, output
return x
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/rnn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapt code from https://github.com/gabrieltseng/pycrop-yield-prediction
import torch
from torch.utils.data import TensorDataset, DataLoader
from sklearn.metrics import r2_score
from scipy.stats.stats import pearsonr
from pathlib import Path
import numpy as np
import pandas as pd
from collections import defaultdict, namedtuple
from datetime import datetime
from .gp import GaussianProcess
from .loss import l1_l2_loss
class ModelBase:
"""
Base class for all models
"""
def __init__(self, model, model_weight, model_bias, model_type, savedir, use_gp=True,
sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
self.savedir = savedir / model_type
self.savedir.mkdir(parents=True, exist_ok=True)
print(f'Using {device.type}')
if device.type != 'cpu':
model = model.cuda()
self.model = model
self.model_type = model_type
self.model_weight = model_weight
self.model_bias = model_bias
self.device = device
# # for reproducability
# torch.manual_seed(42)
# torch.cuda.manual_seed_all(42)
self.gp = None
if use_gp:
self.gp = GaussianProcess(sigma, r_loc, r_year, sigma_e, sigma_b)
def run(self, times, train_years, path_to_histogram=Path('data/deep_gaussian/data.npz'),
pred_years=None, num_runs=2, train_steps=25000, batch_size=64,
starter_learning_rate=1e-3, weight_decay=0, l1_weight=0, patience=10):
"""
Train the models. Note that multiple models are trained: as per the paper, a model
is trained for each year, with all preceding years used as training values. In addition,
for each year, 2 models are trained to account for random initialization.
Parameters
----------
path_to_histogram: pathlib Path, default=Path('data/img_output/histogram_all_full.npz')
The location of the training data
times: {'all', 'realtime'}
Which time indices to train the model on. If 'all', a full run (32 timesteps) is used.
If 'realtime', range(10, 31, 4) is used.
pred_years: int, list or None, default=None
Which years to build models for. If None, the default values from the paper (range(2009, 2016))
are used.
num_runs: int, default=2
The number of runs to do per year. Default taken from the paper
train_steps: int, default=25000
The number of steps for which to train the model. Default taken from the paper.
batch_size: int, default=32
Batch size when training. Default taken from the paper
starter_learning_rate: float, default=1e-3
Starter learning rate. Note that the learning rate is divided by 10 after 2000 and 4000 training
steps. Default taken from the paper
weight_decay: float, default=1
Weight decay (L2 regularization) on the model weights
l1_weight: float, default=0
In addition to MSE, L1 loss is also used (sometimes). This is the weight to assign to this L1 loss.
patience: int or None, default=10
The number of epochs to wait without improvement in the validation loss before terminating training.
Note that the original repository doesn't use early stopping.
"""
with np.load(path_to_histogram) as hist:
images = hist['output_image']
locations = hist['output_locations']
yields = hist['output_yield']
years = hist['output_year']
indices = hist['output_index']
# to collect results
years_list, run_numbers, corr_list, r2_list, rmse_list, me_list, times_list = [], [], [], [], [], [], []
if self.gp is not None:
corr_gp_list, r2_gp_list, rmse_gp_list, me_gp_list = [], [], [], []
if pred_years is None:
pred_years = range(2014, 2019)
elif type(pred_years) is int:
pred_years = [pred_years]
for pred_year in pred_years:
for run_number in range(1, num_runs + 1):
for time in times:
print(f'Training to predict on {pred_year}, Run number {run_number}')
results = self._run_1_year(train_years, images, yields,
years, locations,
indices, pred_year,
time, run_number,
train_steps, batch_size,
starter_learning_rate,
weight_decay, l1_weight,
patience)
years_list.append(pred_year)
run_numbers.append(run_number)
times_list.append(time)
if self.gp is not None:
corr, r2, rmse, me, corr_gp, r2_gp, rmse_gp, me_gp = results
corr_gp_list.append(corr_gp)
r2_gp_list.append(r2_gp)
rmse_gp_list.append(rmse_gp)
me_gp_list.append(me_gp)
else:
corr, r2, rmse, me = results
corr_list.append(corr)
r2_list.append(r2)
rmse_list.append(rmse)
me_list.append(me)
print('-----------')
# save results to a csv file
data = {'year': years_list, 'run_number': run_numbers, 'time_idx': times_list,
'Corr': corr_list, 'R2': r2_list, 'RMSE': rmse_list, 'ME': me_list}
if self.gp is not None:
data['Corr_GP'] = corr_gp_list
data['R2_GP'] = r2_gp_list
data['RMSE_GP'] = rmse_gp_list
data['ME_GP'] = me_gp_list
results_df = pd.DataFrame(data=data)
results_df.to_csv(self.savedir / f'{str(datetime.now())}.csv', index=False)
def _run_1_year(self, train_years, images, yields, years, locations, indices, predict_year, time, run_number,
train_steps, batch_size, starter_learning_rate, weight_decay, l1_weight, patience):
"""
Train one model on one year of data, and then save the model predictions.
To be called by run().
"""
train_data, val_data, test_data = self.prepare_arrays(train_years, images, yields, locations, indices, years, predict_year, time)
# reinitialize the model, since self.model may be trained multiple
# times in one call to run()
self.reinitialize_model(time=time)
train_scores, val_scores = self._train(train_data.images, train_data.yields,
val_data.images, val_data.yields,
train_steps, batch_size,
starter_learning_rate,
weight_decay, l1_weight,
patience)
results = self._predict(*train_data, *test_data, batch_size)
model_information = {
'state_dict': self.model.state_dict(),
'val_loss': val_scores['loss'],
'train_loss': train_scores['loss'],
}
for key in results:
model_information[key] = results[key]
# finally, get the relevant weights for the Gaussian Process
model_weight = self.model.state_dict()[self.model_weight]
model_bias = self.model.state_dict()[self.model_bias]
if self.model.state_dict()[self.model_weight].device != 'cpu':
model_weight, model_bias = model_weight.cpu(), model_bias.cpu()
model_information['model_weight'] = model_weight.numpy()
model_information['model_bias'] = model_bias.numpy()
if self.gp is not None:
print("Running Gaussian Process!")
gp_pred = self.gp.run(model_information['train_feat'],
model_information['test_feat'],
model_information['train_loc'],
model_information['test_loc'],
model_information['train_years'],
model_information['test_years'],
model_information['train_real'],
model_information['model_weight'],
model_information['model_bias'])
model_information['test_pred_gp'] = gp_pred.squeeze(1)
filename = f'{predict_year}_{run_number}_{time}_{"gp" if (self.gp is not None) else ""}.pth.tar'
torch.save(model_information, self.savedir / filename)
return self.analyze_results(model_information['test_real'], model_information['test_pred'],
model_information['test_pred_gp'] if self.gp is not None else None)
def _train(self, train_images, train_yields, val_images, val_yields, train_steps,
batch_size, starter_learning_rate, weight_decay, l1_weight, patience):
"""Defines the training loop for a model
"""
train_dataset, val_dataset = TensorDataset(train_images, train_yields), TensorDataset(val_images, val_yields)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
optimizer = torch.optim.Adam([pam for pam in self.model.parameters()],
lr=starter_learning_rate,
weight_decay=weight_decay)
num_epochs = 50
print(f'Training for {num_epochs} epochs')
train_scores = defaultdict(list)
val_scores = defaultdict(list)
step_number = 0
min_loss = np.inf
best_state = self.model.state_dict()
if patience is not None:
epochs_without_improvement = 0
for epoch in range(num_epochs):
self.model.train()
# running train and val scores are only for printing out
# information
running_train_scores = defaultdict(list)
for train_x, train_y in train_dataloader:
optimizer.zero_grad()
pred_y = self.model(train_x)
loss, running_train_scores = l1_l2_loss(pred_y, train_y, l1_weight,
running_train_scores)
loss.backward()
optimizer.step()
train_scores['loss'].append(loss.item())
step_number += 1
if step_number in [4000, 20000]:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
train_output_strings = []
for key, val in running_train_scores.items():
train_output_strings.append('{}: {}'.format(key, round(np.array(val).mean(), 5)))
running_val_scores = defaultdict(list)
self.model.eval()
with torch.no_grad():
for val_x, val_y, in val_dataloader:
val_pred_y = self.model(val_x)
val_loss, running_val_scores = l1_l2_loss(val_pred_y, val_y, l1_weight,
running_val_scores)
val_scores['loss'].append(val_loss.item())
val_output_strings = []
for key, val in running_val_scores.items():
val_output_strings.append('{}: {}'.format(key, round(np.array(val).mean(), 5)))
print('TRAINING: {}'.format(', '.join(train_output_strings)))
print('VALIDATION: {}'.format(', '.join(val_output_strings)))
epoch_val_loss = np.array(running_val_scores['loss']).mean()
if epoch_val_loss < min_loss:
best_state = self.model.state_dict()
min_loss = epoch_val_loss
if patience is not None:
epochs_without_improvement = 0
elif patience is not None:
epochs_without_improvement += 1
if epochs_without_improvement == patience:
# revert to the best state dict
self.model.load_state_dict(best_state)
print('Early stopping!')
break
self.model.load_state_dict(best_state)
return train_scores, val_scores
def _predict(self, train_images, train_yields, train_locations, train_indices,
train_years, test_images, test_yields, test_locations, test_indices,
test_years, batch_size):
"""
Predict on the training and validation data. Optionally, return the last
feature vector of the model.
"""
train_dataset = TensorDataset(train_images, train_yields,
train_locations, train_indices,
train_years)
test_dataset = TensorDataset(test_images, test_yields,
test_locations, test_indices,
test_years)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size)
results = defaultdict(list)
self.model.eval()
with torch.no_grad():
for train_im, train_yield, train_loc, train_idx, train_year in train_dataloader:
model_output = self.model(train_im,
return_last_dense=True if (self.gp is not None) else False)
if self.gp is not None:
pred, feat = model_output
if feat.device != 'cpu':
feat = feat.cpu()
results['train_feat'].append(feat.numpy())
else:
pred = model_output
results['train_pred'].extend(pred.squeeze(1).tolist())
results['train_real'].extend(train_yield.squeeze(1).tolist())
results['train_loc'].append(train_loc.numpy())
results['train_indices'].append(train_idx.numpy())
results['train_years'].extend(train_year.tolist())
for test_im, test_yield, test_loc, test_idx, test_year in test_dataloader:
model_output = self.model(test_im,
return_last_dense=True if (self.gp is not None) else False)
if self.gp is not None:
pred, feat = model_output
if feat.device != 'cpu':
feat = feat.cpu()
results['test_feat'].append(feat.numpy())
else:
pred = model_output
results['test_pred'].extend(pred.squeeze(1).tolist())
results['test_real'].extend(test_yield.squeeze(1).tolist())
results['test_loc'].append(test_loc.numpy())
results['test_indices'].append(test_idx.numpy())
results['test_years'].extend(test_year.tolist())
for key in results:
if key in ['train_feat', 'test_feat', 'train_loc',
'test_loc', 'train_indices', 'test_indices']:
results[key] = np.concatenate(results[key], axis=0)
else:
results[key] = np.array(results[key])
return results
def prepare_arrays(self, train_years, images, yields, locations, indices, years, predict_year,
time):
"""Prepares the inputs for the model, in the following way:
- normalizes the images
- splits into a train and val set
- turns the numpy arrays into tensors
- removes excess months, if monthly predictions are being made
"""
train_idx = np.nonzero((years >= (predict_year - (train_years + 1))) & (years < (predict_year-1)))[0]
val_idx = np.nonzero(years == predict_year - 1)[0]
test_idx = np.nonzero(years == predict_year)[0]
train_images, val_images, test_images = self._normalize(images[train_idx], images[val_idx], images[test_idx])
print(f'Train set size: {train_idx.shape[0]}, Validation set size {val_idx.shape[0]}, Test set size: {test_idx.shape[0]}')
Data = namedtuple('Data', ['images', 'yields', 'locations', 'indices', 'years'])
train_data_images = torch.tensor(train_images[:, :, :time, :]).float()
train_data_yield = torch.tensor(yields[train_idx]).float().unsqueeze(1)
if self.device.type != 'cpu':
train_data_images = train_data_images.cuda()
train_data_yield = train_data_yield.cuda()
train_data = Data(
images=train_data_images,
yields=train_data_yield,
locations=torch.tensor(locations[train_idx]),
indices=torch.tensor(indices[train_idx]),
years=torch.tensor(years[train_idx])
)
val_data_images = torch.tensor(val_images[:, :, :time, :]).float()
val_data_yield = torch.tensor(yields[val_idx]).float().unsqueeze(1)
if self.device.type != 'cpu':
val_data_images = val_data_images.cuda()
val_data_yield = val_data_yield.cuda()
val_data = Data(
images=val_data_images,
yields=val_data_yield,
locations=torch.tensor(locations[val_idx]),
indices=torch.tensor(indices[val_idx]),
years=torch.tensor(years[val_idx])
)
test_data_images = torch.tensor(test_images[:, :, :time, :]).float()
test_data_yield = torch.tensor(yields[test_idx]).float().unsqueeze(1)
if self.device.type != 'cpu':
test_data_images = test_data_images.cuda()
test_data_yield = test_data_yield.cuda()
test_data = Data(
images=test_data_images,
yields=test_data_yield,
locations=torch.tensor(locations[test_idx]),
indices=torch.tensor(indices[test_idx]),
years=torch.tensor(years[test_idx])
)
return train_data, val_data, test_data
@staticmethod
def _normalize(train_images, val_images, test_images):
"""
Find the mean values of the bands in the train images. Use these values
to normalize both the training and validation images.
A little awkward, since transpositions are necessary to make array broadcasting work
"""
mean = np.mean(train_images, axis=(0, 2, 3))
train_images = (train_images.transpose(0, 2, 3, 1) - mean).transpose(0, 3, 1, 2)
val_images = (val_images.transpose(0, 2, 3, 1) - mean).transpose(0, 3, 1, 2)
test_images = (test_images.transpose(0, 2, 3, 1) - mean).transpose(0, 3, 1, 2)
return train_images, val_images, test_images
@staticmethod
def analyze_results(true, pred, pred_gp):
"""Calculate ME and RMSE
"""
corr = pearsonr(true, pred)[0]
r2 = r2_score(true, pred)
rmse = np.sqrt(np.mean((true - pred) ** 2))
me = np.mean(true - pred)
print(f'Without GP: Corr: {corr}, R2: {r2}, RMSE: {rmse}, ME: {me}')
if pred_gp is not None:
corr_gp = pearsonr(true, pred_gp)[0]
r2_gp = r2_score(true, pred_gp)
rmse_gp = np.sqrt(np.mean((true - pred_gp) ** 2))
me_gp = np.mean(true - pred_gp)
print(f'With GP: Corr: {corr_gp}, R2: {r2_gp}, RMSE: {rmse_gp}, ME: {me_gp}')
return corr, r2, rmse, me, corr_gp, r2_gp, rmse_gp, me_gp
return corr, r2, rmse, me
def reinitialize_model(self, time=None):
raise NotImplementedError
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/deep_gaussian_process/base.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.c3d.conv3d import C3D
__all__ = ['C3D']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/c3d/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on code from https://github.com/jfzhang95/pytorch-video-recognition/blob/master/network/C3D_model.py
# Architecture is taken from https://esc.fnwi.uva.nl/thesis/centraal/files/f1570224447.pdf
import torch
import torch.nn as nn
class C3D(nn.Module):
"""
The C3D network.
"""
def __init__(self, in_channels, n_tsteps):
super(C3D, self).__init__()
self.n_tsteps = n_tsteps
# input (9, 7, 50, 50), output (9, 7, 50, 50)
self.dimr1 = nn.Conv3d(in_channels, in_channels, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1))
self.bn_dimr1 = nn.BatchNorm3d(in_channels, eps=1e-6, momentum=0.1)
# output (3, 7, 50, 50)
self.dimr2 = nn.Conv3d(in_channels, 3, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0))
self.bn_dimr2 = nn.BatchNorm3d(3, eps=1e-6, momentum=0.1)
# output (64, 7, 50, 50)
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.bn1 = nn.BatchNorm3d(64, eps=1e-6, momentum=0.1)
# output (64, 7, 25, 25)
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
# output (128, 7, 25, 25)
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.bn2 = nn.BatchNorm3d(128, eps=1e-6, momentum=0.1)
# output (128, 7, 12, 12)
self.pool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
# output (256, 7, 12, 12)
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.bn3a = nn.BatchNorm3d(256, eps=1e-6, momentum=0.1)
# output (256, 7, 12, 12)
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.bn3b = nn.BatchNorm3d(256, eps=1e-6, momentum=0.1)
# output (256, 3, 6, 6)
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
# output (512, 3, 6, 6)
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.bn4a = nn.BatchNorm3d(512, eps=1e-6, momentum=0.1)
# output (512, 3, 6, 6)
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.bn4b = nn.BatchNorm3d(512, eps=1e-6, momentum=0.1)
# output (512, 1, 3, 3)
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.pool4_keept = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.fc5 = nn.Linear(4608, 1024)
self.fc6 = nn.Linear(1024, 1)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
def forward(self, x):
x = self.relu(self.bn_dimr1(self.dimr1(x)))
x = self.relu(self.bn_dimr2(self.dimr2(x)))
x = self.relu(self.bn1(self.conv1(x)))
x = self.pool1(x)
x = self.relu(self.bn2(self.conv2(x)))
x = self.pool2(x)
x = self.relu(self.bn3a(self.conv3a(x)))
x = self.relu(self.bn3b(self.conv3b(x)))
x = self.pool3(x)
x = self.relu(self.bn4a(self.conv4a(x)))
x = self.relu(self.bn4b(self.conv4b(x)))
if self.n_tsteps > 3:
x = self.pool4(x)
else:
x = self.pool4_keept(x)
# output (512, 1, 3, 3)
x = x.view(-1, 4608)
x = self.relu(self.fc5(x))
x = self.dropout(x)
pred = torch.squeeze(self.fc6(x))
return pred
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# def get_1x_lr_params(model):
# """
# This generator returns all the parameters for conv and two fc layers of the net.
# """
# b = [model.conv1, model.conv2, model.conv3a, model.conv3b, model.conv4a, model.conv4b,
# model.conv5a, model.conv5b, model.fc6, model.fc7]
# for i in range(len(b)):
# for k in b[i].parameters():
# if k.requires_grad:
# yield k
#
# def get_10x_lr_params(model):
# """
# This generator returns all the parameters for the last fc layer of the net.
# """
# b = [model.fc8]
# for j in range(len(b)):
# for k in b[j].parameters():
# if k.requires_grad:
# yield k
# if __name__ == "__main__":
# inputs = torch.rand(1, 3, 16, 112, 112)
# net = C3D(num_classes=101, pretrained=True)
#
# outputs = net.forward(inputs)
# print(outputs.size())
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/c3d/conv3d.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.semi_transformer.Layers import EncoderLayer
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_hid, n_position):
super(PositionalEncoding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(n_position, d_hid)
position = torch.arange(0.0, n_position).unsqueeze(1)
div_term = torch.exp(torch.arange(0.0, d_hid, 2) * -(math.log(10000.0) / d_hid))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
class Encoder(nn.Module):
''' A encoder model with self attention mechanism. '''
def __init__(
self, n_tsteps, query_type, d_word_vec, d_model, d_inner, n_layers, n_head, d_k, d_v, dropout=0.1,
apply_position_enc=True):
super().__init__()
self.apply_position_enc = apply_position_enc
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_tsteps)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(n_tsteps, query_type, d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, x, return_attns=False):
enc_slf_attn_list = []
# -- Forward
if self.apply_position_enc:
x = self.position_enc(x)
enc_output = self.dropout(x)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(enc_output)
enc_slf_attn_list += [enc_slf_attn] if return_attns else []
enc_output = self.layer_norm(enc_output)
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output,
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/AttentionModels.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.semi_transformer.SemiTransformer import SemiTransformer
from crop_yield_prediction.models.semi_transformer.TileNet import make_tilenet
from crop_yield_prediction.models.semi_transformer.Optim import ScheduledOptim
__all__ = ['SemiTransformer', 'ScheduledOptim', 'make_tilenet']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
''' Define the sublayers in encoder/decoder layer '''
from crop_yield_prediction.models.semi_transformer.Modules import ScaledDotProductAttention
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_tsteps, query_type, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.global_query = nn.Parameter(torch.randn(n_head, d_k, n_tsteps), requires_grad=True)
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.query_type = query_type
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
# sz_b: batch size, len_q, len_k, len_v: number of time steps
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
if self.query_type == 'global':
q = self.global_query
q = q.transpose(1, 2) # transpose to n * lq * dk
elif self.query_type == 'fixed':
q = self.layer_norm(q)
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
q = q.transpose(1, 2) # transpose to b x n x lq x dk
elif self.query_type == 'combine':
lq = self.layer_norm(q)
lq = self.w_qs(lq).view(sz_b, len_q, n_head, d_k)
lq = lq.transpose(1, 2)
gq = self.global_query
gq = gq.transpose(1, 2)
q = lq + gq
elif self.query_type == 'separate':
lq = self.layer_norm(q)
lq = self.w_qs(lq).view(sz_b, len_q, n_head, d_k)
lq = lq.transpose(1, 2)
gq = self.global_query
gq = gq.transpose(1, 2)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
k, v = k.transpose(1, 2), v.transpose(1, 2) # Transpose for attention dot product: b x n x lq x dv
# Transpose for attention dot product: b x n x lq x dv
if self.query_type == 'separate':
q, attn = self.attention(lq, k, v, gq)
else:
q, attn = self.attention(q, k, v)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
return q, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.layer_norm(x)
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
return x
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/SubLayers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, gq=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
attn = self.dropout(F.softmax(attn, dim=-1))
if gq is not None:
attn_gq = torch.matmul(gq / self.temperature, k.transpose(2, 3))
attn_gq = self.dropout(F.softmax(attn_gq, dim=-1))
attn += attn_gq
output = torch.matmul(attn, v)
return output, attn
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/Modules.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.semi_transformer.AttentionModels import Encoder
from crop_yield_prediction.models.semi_transformer.TileNet import make_tilenet
import torch
import torch.nn as nn
class SemiTransformer(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
def __init__(
self, tn_in_channels, tn_z_dim, tn_warm_start_model,
sentence_embedding, output_pred, query_type,
attn_n_tsteps, d_word_vec=512, d_model=512, d_inner=2048,
n_layers=6, n_head=8, d_k=64, d_v=64, dropout=0.1, apply_position_enc=True):
super().__init__()
assert d_model == d_word_vec, \
'To facilitate the residual connections, \
the dimensions of all module outputs shall be the same.'
self.output_pred = output_pred
self.tilenet = make_tilenet(tn_in_channels, tn_z_dim)
self.encoder = Encoder(
attn_n_tsteps, query_type=query_type, d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v, dropout=dropout,
apply_position_enc=apply_position_enc)
self.sentence_embedding = sentence_embedding
if self.output_pred:
self.predict_proj = nn.Linear(d_model, 1)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
if tn_warm_start_model is not None:
warm_start = torch.load(tn_warm_start_model)
self.tilenet.load_state_dict(warm_start['model_state_dict'])
def forward(self, x, unsup_weight):
"""
Input x: (n_batches, n_tsteps, n_triplets, n_var, img_height, img_width)
"""
n_batches, n_tsteps, n_triplets, n_vars, img_size = x.shape[:-1]
emb_triplets = None
if unsup_weight != 0:
x = x.view(n_batches * n_tsteps * n_triplets, n_vars, img_size, img_size)
emb_triplets = self.tilenet(x)
emb_triplets = emb_triplets.view(n_batches, n_tsteps, n_triplets, -1)
emb_x = emb_triplets[:, :, 0, :]
# emb_triplets = emb_triplets.view(n_batches * n_tsteps, n_triplets, -1)
else:
x = x[:, :, 0, :, :, :]
x = x.view(n_batches * n_tsteps, n_vars, img_size, img_size)
emb_x = self.tilenet(x)
emb_x = emb_x.view(n_batches, n_tsteps, -1)
enc_output, *_ = self.encoder(emb_x)
if self.sentence_embedding == 'simple_average':
enc_output = enc_output.mean(1)
pred = torch.squeeze(self.predict_proj(enc_output))
return emb_triplets, pred
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/SemiTransformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
from crop_yield_prediction.models.semi_transformer.SubLayers import MultiHeadAttention, PositionwiseFeedForward
import torch.nn as nn
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, n_tsteps, query_type, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_tsteps, query_type, n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/Layers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on transformer code from https://github.com/jadore801120/attention-is-all-you-need-pytorch
'''A wrapper class for scheduled optimizer '''
import numpy as np
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, init_lr, d_model, n_warmup_steps):
self._optimizer = optimizer
self.init_lr = init_lr
self.d_model = d_model
self.n_warmup_steps = n_warmup_steps
self.n_steps = 0
def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
"Zero out the gradients with the inner optimizer"
self._optimizer.zero_grad()
def _get_lr_scale(self):
d_model = self.d_model
n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps
return (d_model ** -0.5) * min(n_steps ** (-0.5), n_steps * n_warmup_steps ** (-1.5))
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/Optim.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on tile2vec code from https://github.com/ermongroup/tile2vec
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class TileNet(nn.Module):
def __init__(self, num_blocks, in_channels=4, z_dim=512):
super(TileNet, self).__init__()
self.in_channels = in_channels
self.z_dim = z_dim
self.in_planes = 64
self.conv1 = nn.Conv2d(self.in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(512, num_blocks[3], stride=2)
self.layer5 = self._make_layer(self.z_dim, num_blocks[4], stride=2)
def _make_layer(self, planes, num_blocks, stride, no_relu=False):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(ResidualBlock(self.in_planes, planes, stride=stride))
self.in_planes = planes
return nn.Sequential(*layers)
def encode(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = F.avg_pool2d(x, 4)
z = x.view(x.size(0), -1)
return z
def forward(self, x):
return self.encode(x)
def loss(self, anchor, temporal_neighbor, spatial_neighbor, spatial_distant, margin, l2, ltn):
"""
Computes loss for each batch.
"""
z_a, z_tn, z_sn, z_d = (self.encode(anchor), self.encode(temporal_neighbor), self.encode(spatial_neighbor),
self.encode(spatial_distant))
return triplet_loss(z_a, z_tn, z_sn, z_d, margin, l2, ltn)
def triplet_loss(z_a, z_tn, z_sn, z_d, margin, l2, ltn):
dim = z_a.shape[-1]
l_n = torch.sqrt(((z_a - z_sn) ** 2).sum(dim=1))
l_d = - torch.sqrt(((z_a - z_d) ** 2).sum(dim=1))
sn_loss = F.relu(l_n + l_d + margin)
tn_loss = torch.sqrt(((z_a - z_tn) ** 2).sum(dim=1))
# average by #samples in mini-batch
l_n = torch.mean(l_n)
l_d = torch.mean(l_d)
l_nd = torch.mean(l_n + l_d)
sn_loss = torch.mean(sn_loss)
tn_loss = torch.mean(tn_loss)
loss = (1 - ltn) * sn_loss + ltn * tn_loss
norm_loss = 0
if l2 != 0:
z_a_norm = torch.sqrt((z_a ** 2).sum(dim=1))
z_sn_norm = torch.sqrt((z_sn ** 2).sum(dim=1))
z_d_norm = torch.sqrt((z_d ** 2).sum(dim=1))
z_tn_norm = torch.sqrt((z_tn ** 2).sum(dim=1))
norm_loss = torch.mean(z_a_norm + z_sn_norm + z_d_norm + z_tn_norm) / (dim ** 0.5)
loss += l2 * norm_loss
return loss, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss
def make_tilenet(in_channels, z_dim=512):
"""
Returns a TileNet for unsupervised Tile2Vec with the specified number of
input channels and feature dimension.
"""
num_blocks = [2, 2, 2, 2, 2]
return TileNet(num_blocks, in_channels=in_channels, z_dim=z_dim)
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/models/semi_transformer/TileNet.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
class CnnLSTMDataset(Dataset):
"""
Case 0 n_triplets_per_file == (max_index + 1): load numpy file in __init__, retrieve idx in __getitem__
Case 1 n_triplets_per_file == 1: load numpy file for idx in __getitem__
Case 2 n_triplets_per_file > 1: load numpy file that stores idx (and others) in __getitem__
idx is the index in "current" train/validation/test set. global idx is the index in the whole data set.
Indices in train/validation/test set need to be sequential.
"""
def __init__(self, data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file):
self.data_dir = data_dir
self.start_index = start_index
self.end_index = end_index
self.n_triplets = end_index - start_index + 1
self.n_triplets_per_file = n_triplets_per_file
self.y = y
self.n_tsteps = n_tsteps
self.max_index = max_index
if n_triplets_per_file == (max_index + 1):
self.X_data = np.load('{}/0_{}.npy'.format(data_dir, max_index))
def __len__(self):
return self.n_triplets
def __getitem__(self, idx):
global_idx = idx + self.start_index
if self.n_triplets_per_file == (self.max_index + 1):
X_idx = self.X_data[global_idx][:self.n_tsteps]
else:
if self.n_triplets_per_file > 1:
file_idx = global_idx // self.n_triplets_per_file
local_idx = global_idx % self.n_triplets_per_file
end_idx = min((file_idx+1)*self.n_triplets_per_file-1, self.max_index)
X_idx = np.load('{}/{}_{}.npy'.format(self.data_dir,
file_idx * self.n_triplets_per_file,
end_idx))[local_idx][:self.n_tsteps]
else:
X_idx = np.load('{}/{}.npy'.format(self.data_dir, global_idx))[0][:self.n_tsteps]
y_idx = np.array(self.y[idx])
X_idx = X_idx[:, 0, :, :, :]
return torch.from_numpy(X_idx).float(), torch.from_numpy(y_idx).float()
def cnn_lstm_dataloader(data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file,
batch_size=50, shuffle=True, num_workers=4):
"""
img_type: 'landsat', 'rgb', or 'naip'
augment: random flip and rotate for data augmentation
shuffle: turn shuffle to False for producing embeddings that correspond to original tiles.
Returns a DataLoader with either NAIP (RGB/IR), RGB, or Landsat tiles.
"""
dataset = CnnLSTMDataset(data_dir, start_index, end_index, y, n_tsteps, max_index,
n_triplets_per_file=n_triplets_per_file)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/dataloader/cnn_lstm_dataloader.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
class C3DDataset(Dataset):
"""
Case 0 n_triplets_per_file == (max_index + 1): load numpy file in __init__, retrieve idx in __getitem__
Case 1 n_triplets_per_file == 1: load numpy file for idx in __getitem__
Case 2 n_triplets_per_file > 1: load numpy file that stores idx (and others) in __getitem__
idx is the index in "current" train/validation/test set. global idx is the index in the whole data set.
Indices in train/validation/test set need to be sequential.
"""
def __init__(self, data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file):
self.data_dir = data_dir
self.start_index = start_index
self.end_index = end_index
self.n_triplets = end_index - start_index + 1
self.n_triplets_per_file = n_triplets_per_file
self.y = y
self.n_tsteps = n_tsteps
self.max_index = max_index
if n_triplets_per_file == (max_index + 1):
self.X_data = np.load('{}/0_{}.npy'.format(data_dir, max_index))
def __len__(self):
return self.n_triplets
def __getitem__(self, idx):
global_idx = idx + self.start_index
if self.n_triplets_per_file == (self.max_index + 1):
X_idx = self.X_data[global_idx][:self.n_tsteps]
else:
if self.n_triplets_per_file > 1:
file_idx = global_idx // self.n_triplets_per_file
local_idx = global_idx % self.n_triplets_per_file
end_idx = min((file_idx+1)*self.n_triplets_per_file-1, self.max_index)
X_idx = np.load('{}/{}_{}.npy'.format(self.data_dir,
file_idx * self.n_triplets_per_file,
end_idx))[local_idx][:self.n_tsteps]
else:
X_idx = np.load('{}/{}.npy'.format(self.data_dir, global_idx))[0][:self.n_tsteps]
y_idx = np.array(self.y[idx])
X_idx = X_idx[:, 0, :, :, :]
X_idx = np.swapaxes(X_idx, 0, 1)
return torch.from_numpy(X_idx).float(), torch.from_numpy(y_idx).float()
def c3d_dataloader(data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file,
batch_size=50, shuffle=True, num_workers=4):
"""
img_type: 'landsat', 'rgb', or 'naip'
augment: random flip and rotate for data augmentation
shuffle: turn shuffle to False for producing embeddings that correspond to original tiles.
Returns a DataLoader with either NAIP (RGB/IR), RGB, or Landsat tiles.
"""
dataset = C3DDataset(data_dir, start_index, end_index, y, n_tsteps, max_index,
n_triplets_per_file=n_triplets_per_file)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/dataloader/c3d_dataloader.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crop_yield_prediction.dataloader.c3d_dataloader import c3d_dataloader
from crop_yield_prediction.dataloader.semi_cropyield_dataloader import semi_cropyield_dataloader
from crop_yield_prediction.dataloader.cnn_lstm_dataloader import cnn_lstm_dataloader
from crop_yield_prediction.dataloader.cross_location_dataloader import cross_location_dataloader
__all__ = ['c3d_dataloader',
'semi_cropyield_dataloader',
'cnn_lstm_dataloader',
'cross_location_dataloader']
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/dataloader/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
class CrossLocationDataset(Dataset):
"""
Case 0 n_triplets_per_file == (max_index + 1): load numpy file in __init__, retrieve idx in __getitem__
Case 1 n_triplets_per_file == 1: load numpy file for idx in __getitem__
Case 2 n_triplets_per_file > 1: load numpy file that stores idx (and others) in __getitem__
idx is the index in "current" train/validation/test set. global idx is the index in the whole data set.
Indices in train/validation/test set need to be sequential.
"""
def __init__(self, data_dir, global_index_dic, y, n_tsteps, max_index, n_triplets_per_file):
self.data_dir = data_dir
self.global_index_dic = global_index_dic
self.n_triplets = len(global_index_dic)
self.y = y
self.n_tsteps = n_tsteps
self.max_index = max_index
if n_triplets_per_file == (max_index + 1):
self.X_data = np.load('{}/0_{}.npy'.format(data_dir, max_index))
assert n_triplets_per_file == 1
def __len__(self):
return self.n_triplets
def __getitem__(self, idx):
global_idx = self.global_index_dic[idx]
X_idx = np.load('{}/{}.npy'.format(self.data_dir, global_idx))[0][:self.n_tsteps]
y_idx = np.array(self.y[idx])
return torch.from_numpy(X_idx).float(), torch.from_numpy(y_idx).float()
def cross_location_dataloader(data_dir, global_index_dic, y, n_tsteps, max_index, n_triplets_per_file,
batch_size=50, shuffle=True, num_workers=4):
"""
img_type: 'landsat', 'rgb', or 'naip'
augment: random flip and rotate for data augmentation
shuffle: turn shuffle to False for producing embeddings that correspond to original tiles.
Returns a DataLoader with either NAIP (RGB/IR), RGB, or Landsat tiles.
"""
dataset = CrossLocationDataset(data_dir, global_index_dic, y, n_tsteps, max_index,
n_triplets_per_file=n_triplets_per_file)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/dataloader/cross_location_dataloader.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
class SemiCropYieldDataset(Dataset):
"""
Case 0 n_triplets_per_file == (max_index + 1): load numpy file in __init__, retrieve idx in __getitem__
Case 1 n_triplets_per_file == 1: load numpy file for idx in __getitem__
Case 2 n_triplets_per_file > 1: load numpy file that stores idx (and others) in __getitem__
idx is the index in "current" train/validation/test set. global idx is the index in the whole data set.
Indices in train/validation/test set need to be sequential.
"""
def __init__(self, data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file):
self.data_dir = data_dir
self.start_index = start_index
self.end_index = end_index
self.n_triplets = end_index - start_index + 1
self.n_triplets_per_file = n_triplets_per_file
self.y = y
self.n_tsteps = n_tsteps
self.max_index = max_index
if n_triplets_per_file == (max_index + 1):
self.X_data = np.load('{}/0_{}.npy'.format(data_dir, max_index))
def __len__(self):
return self.n_triplets
def __getitem__(self, idx):
global_idx = idx + self.start_index
if self.n_triplets_per_file == (self.max_index + 1):
X_idx = self.X_data[global_idx][:self.n_tsteps]
else:
if self.n_triplets_per_file > 1:
file_idx = global_idx // self.n_triplets_per_file
local_idx = global_idx % self.n_triplets_per_file
end_idx = min((file_idx+1)*self.n_triplets_per_file-1, self.max_index)
X_idx = np.load('{}/{}_{}.npy'.format(self.data_dir,
file_idx * self.n_triplets_per_file,
end_idx))[local_idx][:self.n_tsteps]
else:
X_idx = np.load('{}/{}.npy'.format(self.data_dir, global_idx))[0][:self.n_tsteps]
y_idx = np.array(self.y[idx])
return torch.from_numpy(X_idx).float(), torch.from_numpy(y_idx).float()
def semi_cropyield_dataloader(data_dir, start_index, end_index, y, n_tsteps, max_index, n_triplets_per_file,
batch_size=50, shuffle=True, num_workers=4):
"""
img_type: 'landsat', 'rgb', or 'naip'
augment: random flip and rotate for data augmentation
shuffle: turn shuffle to False for producing embeddings that correspond to original tiles.
Returns a DataLoader with either NAIP (RGB/IR), RGB, or Landsat tiles.
"""
dataset = SemiCropYieldDataset(data_dir, start_index, end_index, y, n_tsteps, max_index,
n_triplets_per_file=n_triplets_per_file)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
| Context-Aware-Representation-Crop-Yield-Prediction-main | crop_yield_prediction/dataloader/semi_cropyield_dataloader.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['cdl_values_to_crops', 'crops_to_cdl_values',
'CLIMATE_VARS', 'STATIC_CLIMATE_VARS', 'DYNAMIC_CLIMATE_VARS']
CLIMATE_VARS = ['ppt', 'evi', 'ndvi', 'elevation', 'lst_day', 'lst_night', 'clay', 'sand', 'silt']
STATIC_CLIMATE_VARS = ['elevation', 'clay', 'sand', 'silt']
DYNAMIC_CLIMATE_VARS = [x for x in CLIMATE_VARS if x not in STATIC_CLIMATE_VARS]
cdl_values_to_crops = {1: 'Corn', 2: 'Cotton', 3: 'Rice', 4: 'Sorghum', 5: 'Soybeans', 6: 'Sunflower',
10: 'Peanuts', 11: 'Tobacco', 12: 'Sweet Corn', 13: 'Pop or Orn Corn', 14: 'Mint', 21: 'Barley',
22: 'Durum Wheat', 23: 'Spring Wheat', 24: 'Winter Wheat', 25: 'Other Small Grains',
26: 'Dbl Crop WinWht/Soybeans', 27: 'Rye', 28: 'Oats', 29: 'Millet', 30: 'Speltz', 31: 'Canola',
32: 'Flaxseed', 33: 'Safflower', 34: 'Rape Seed', 35: 'Mustard', 36: 'Alfalfa',
37: 'Other Hay/Non Alfalfa', 38: 'Camelina', 39: 'Buckwheat', 41: 'Sugarbeets', 42: 'Dry Beans',
43: 'Potatoes', 44: 'Other Crops', 45: 'Sugarcane', 46: 'Sweet Potatoes',
47: 'Misc Vegs & Fruits', 48: 'Watermelons', 49: 'Onions', 50: 'Cucumbers', 51: 'Chick Peas',
52: 'Lentils', 53: 'Peas', 54: 'Tomatoes', 55: 'Caneberries', 56: 'Hops', 57: 'Herbs',
58: 'Clover/Wildflowers', 59: 'Sod/Grass Seed', 60: 'Switchgrass', 61: 'Fallow/Idle Cropland',
63: 'Forest', 64: 'Shrubland1', 65: 'Barren1', 66: 'Cherries', 67: 'Peaches', 68: 'Apples',
69: 'Grapes', 70: 'Christmas Trees', 71: 'Other Tree Crops', 72: 'Citrus', 74: 'Pecans',
75: 'Almonds', 76: 'Walnuts', 77: 'Pears', 81: 'Clouds/No Data', 82: 'Developed', 83: 'Water',
87: 'Wetlands', 88: 'Nonag/Undefined', 92: 'Aquaculture', 111: 'Open Water',
112: 'Perennial Ice/Snow ', 121: 'Developed/Open Space', 122: 'Developed/Low Intensity',
123: 'Developed/Med Intensity', 124: 'Developed/High Intensity', 131: 'Barren2',
141: 'Deciduous Forest', 142: 'Evergreen Forest', 143: 'Mixed Forest', 152: 'Shrubland2',
176: 'Grassland/Pasture', 190: 'Woody Wetlands', 195: 'Herbaceous Wetlands', 204: 'Pistachios',
205: 'Triticale', 206: 'Carrots', 207: 'Asparagus', 208: 'Garlic', 209: 'Cantaloupes',
210: 'Prunes', 211: 'Olives', 212: 'Oranges', 213: 'Honeydew Melons', 214: 'Broccoli',
215: 'Avocados', 216: 'Peppers', 217: 'Pomegranates', 218: 'Nectarines', 219: 'Greens',
220: 'Plums', 221: 'Strawberries', 222: 'Squash', 223: 'Apricots', 224: 'Vetch',
225: 'Dbl Crop WinWht/Corn', 226: 'Dbl Crop Oats/Corn', 227: 'Lettuce', 229: 'Pumpkins',
230: 'Dbl Crop Lettuce/Durum Wht',
231: 'Dbl Crop Lettuce/Cantaloupe', 232: 'Dbl Crop Lettuce/Cotton',
233: 'Dbl Crop Lettuce/Barley', 234: 'Dbl Crop Durum Wht/Sorghum',
235: 'Dbl Crop Barley/Sorghum', 236: 'Dbl Crop WinWht/Sorghum', 237: 'Dbl Crop Barley/Corn',
238: 'Dbl Crop WinWht/Cotton', 239: 'Dbl Crop Soybeans/Cotton', 240: 'Dbl Crop Soybeans/Oats',
241: 'Dbl Crop Corn/Soybeans', 242: 'Blueberries', 243: 'Cabbage', 244: 'Cauliflower',
245: 'Celery', 246: 'Radishes', 247: 'Turnips', 248: 'Eggplants', 249: 'Gourds',
250: 'Cranberries', 254: 'Dbl Crop Barley/Soybeans'}
# A reverse map of above, allowing you to lookup CDL values from category name.
crops_to_cdl_values = {v: k for k, v in cdl_values_to_crops.items()}
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .sample_for_counties import generate_training_for_counties
from .sample_for_pretrained import generate_training_for_pretrained
__all__ = ["generate_training_for_counties",
"generate_training_for_pretrained"]
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/sample_quadruplets/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import numpy.ma as ma
import matplotlib.pyplot as plt
import pickle
import sys
sys.path.append("..")
from data_preprocessing import CLIMATE_VARS
def generate_dims_for_counties(croptype):
yield_data = pd.read_csv('../../processed_data/crop_yield/{}_2000_2018.csv'.format(croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
ppt_fh = Dataset('../../experiment_data/spatial_temporal/nc_files/2014.nc', 'r')
v_ppt = ppt_fh.variables['ppt'][0, :, :]
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
counties = pd.read_csv('../../processed_data/counties/lst/us_counties_cro_cvm_locations.csv')
county_dic = {}
for c in counties.itertuples():
state, county, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat0, lat1, lon0, lon1]
yield_dim_csv = []
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
if (state, county) not in county_dic:
continue
lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 49
assert lon1 - lon0 == 49
selected_ppt = v_ppt[lat0:lat1+1, lon0:lon1+1]
if ma.count_masked(selected_ppt) != 0:
continue
yield_dim_csv.append([state, county, year, value, (lat0+lat1)//2+1, (lon0+lon1)//2+1])
yield_dim_csv = pd.DataFrame(yield_dim_csv, columns=['state', 'county', 'year', 'value', 'lat', 'lon'])
yield_dim_csv.to_csv('../../experiment_data/spatial_temporal/counties/dim_y.csv')
def get_imgs_and_timesteps_for_quadruplets(yield_dim_csv, start_month, end_month, n_distant, sample_case):
yield_dim_data = pd.read_csv(yield_dim_csv)
img_timestep_quadruplets = []
for i_data, data in enumerate(yield_dim_data.itertuples()):
a_year, a_lat, a_lon = data.year, data.lat, data.lon
exp = [i_data, a_year, a_lat, a_lon]
months_head = []
for a_month in range(start_month, end_month+1):
for i_distant in range(n_distant):
if sample_case == 'hard':
exp += [a_year, a_month]
elif sample_case == 'soft':
i_distant_year = np.random.randint(2000, a_year+1)
i_distant_month = np.random.randint(start_month, end_month+1)
exp += [i_distant_year, i_distant_month]
months_head.append('a_month{}_d{}_year'.format(a_month, i_distant))
months_head.append('a_month{}_d{}_month'.format(a_month, i_distant))
img_timestep_quadruplets.append(exp)
img_timestep_quadruplets = pd.DataFrame(img_timestep_quadruplets,
columns=['index', 'a_year', 'a_lat', 'a_lon'] + months_head)
img_timestep_quadruplets.to_csv('../../experiment_data/spatial_temporal/counties/img_timestep_quadruplets_{}.csv'.format(sample_case), index=False)
# output dimension: [n_samples, n_timesteps, 1+n_temporal_neighbor+n_spatial_neighbor+n_distant, n_variables, 50, 50]
def generate_training_for_counties(out_dir, img_dir, start_month, end_month, start_month_index, n_spatial_neighbor, n_distant,
img_timestep_quadruplets, img_size, neighborhood_radius, distant_radius=None, prenorm=True):
if distant_radius is None:
output_dir = '{}/nr_{}'.format(out_dir, neighborhood_radius)
else:
output_dir = '{}/nr_{}_dr{}'.format(out_dir, neighborhood_radius, distant_radius)
os.makedirs(output_dir, exist_ok=True)
size_even = (img_size % 2 == 0)
tile_radius = img_size // 2
img_timestep_quadruplets = pd.read_csv(img_timestep_quadruplets)
columns = ['index', 'a_year', 'a_lat', 'a_lon']
for a_month in range(start_month, end_month + 1):
for i_distant in range(n_distant):
columns.append('a_month{}_d{}_year'.format(a_month, i_distant))
columns.append('a_month{}_d{}_month'.format(a_month, i_distant))
column_index = {x: i+1 for i, x in enumerate(columns)}
print(column_index)
# monthly mean
# {0: [57.3017, 0.15911582, 0.30263194, 349.417, 277.6782, 268.29166, 19.372774, 38.962997, 48.396523],
# 1: [73.980095, 0.19241332, 0.35961938, 349.417, 286.09885, 273.22183, 19.372774, 38.962997, 48.396523],
# 2: [87.33122, 0.27037004, 0.46616226, 349.417, 294.85776, 279.05136, 19.372774, 38.962997, 48.396523],
# 3: [106.66116, 0.38423842, 0.5934064, 349.417, 299.4103, 284.4472, 19.372774, 38.962997, 48.396523],
# 4: [111.04675, 0.46401384, 0.6796355, 349.417, 302.36234, 289.90076, 19.372774, 38.962997, 48.396523],
# 5: [100.82861, 0.5001915, 0.7197062, 349.417, 303.2484, 292.21436, 19.372774, 38.962997, 48.396523],
# 6: [93.255714, 0.4844686, 0.71926653, 349.417, 302.26636, 291.2553, 19.372774, 38.962997, 48.396523],
# 7: [88.390526, 0.41577676, 0.67133075, 349.417, 299.28165, 287.00778, 19.372774, 38.962997, 48.396523]}
# monthly std
# {0: [49.994095, 0.09068172, 0.18281896, 258.4355, 9.178257, 8.026086, 17.579718, 24.665548, 20.690763],
# 1: [56.513268, 0.084073044, 0.15483402, 258.4355, 7.8059173, 6.699706, 17.579718, 24.665548, 20.690763],
# 2: [53.212543, 0.11533181, 0.17148177, 258.4355, 5.039537, 5.1716127, 17.579718, 24.665548, 20.690763],
# 3: [60.39661, 0.1439103, 0.18301234, 258.4355, 4.484442, 4.53816, 17.579718, 24.665548, 20.690763],
# 4: [60.862434, 0.13719948, 0.16091526, 258.4355, 4.6158304, 3.6706781, 17.579718, 24.665548, 20.690763],
# 5: [58.666737, 0.13492998, 0.15656078, 258.4355, 5.140572, 3.0179217, 17.579718, 24.665548, 20.690763],
# 6: [60.55039, 0.14212538, 0.16778886, 258.4355, 4.962786, 3.2834055, 17.579718, 24.665548, 20.690763],
# 7: [64.83031, 0.12455596, 0.17052796, 258.4355, 4.5033474, 3.5745926, 17.579718, 24.665548, 20.690763]}
mean_file = open('{}/monthly_channel_wise_mean.pkl'.format(img_dir), 'rb')
std_file = open('{}/monthly_channel_wise_std.pkl'.format(img_dir), 'rb')
monthly_mean = pickle.load(mean_file)
monthly_std = pickle.load(std_file)
img_dic = {}
for year in range(2000, 2019):
fh = Dataset('{}/{}.nc'.format(img_dir, year))
img = []
for cv in CLIMATE_VARS:
img.append(fh.variables[cv][:])
img = ma.asarray(img)
# (n_variables, n_timesteps, n_lat, n_lon)
img_shape = img.shape
fh.close()
img_dic[year] = img
n_quadruplets = img_timestep_quadruplets['index'].max() + 1
print('Number of quadruplets: {}'.format(n_quadruplets))
n_t = end_month - start_month + 1
n_tiles_per_file = 1
n_tiles = 0
tiles = []
n_samples = 0
a_lats, a_lons, sn_lats, sn_lons, d_lats, d_lons = [], [], [], [], [], []
print('Start sampling...')
for data in img_timestep_quadruplets.itertuples():
index, a_year, a_lat, a_lon = data.index, data.a_year, data.a_lat, data.a_lon
quadruplets_tile = np.zeros((n_t, (1+1+n_spatial_neighbor+n_distant), len(CLIMATE_VARS), img_size, img_size))
for a_month in range(start_month, end_month+1):
a_month_tile_index = a_month - start_month
current_ts_index = a_month-start_month+start_month_index
lat0, lat1, lon0, lon1 = _get_lat_lon_range(a_lat, a_lon, tile_radius, size_even)
current_tile = img_dic[a_year][:, current_ts_index, lat0:lat1+1, lon0:lon1+1]
assert ma.count_masked(current_tile) == 0
current_tile = np.asarray(current_tile)
if prenorm:
current_tile = _prenormalize_tile(current_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[a_month_tile_index, 0] = current_tile
n_samples += 1
a_lats.append(a_lat)
a_lons.append(a_lon)
tn_tile = img_dic[a_year][:, current_ts_index-1, lat0:lat1 + 1, lon0:lon1 + 1]
assert ma.count_masked(tn_tile) == 0
tn_tile = np.asarray(tn_tile)
if prenorm:
tn_tile = _prenormalize_tile(tn_tile, monthly_mean[current_ts_index-1], monthly_std[current_ts_index-1])
quadruplets_tile[a_month_tile_index, 1] = tn_tile
n_samples += 1
for i_spatial_neighbor in range(n_spatial_neighbor):
sn_tile, sn_lat, sn_lon = _sample_neighbor(img_dic[a_year], a_lat, a_lon, neighborhood_radius,
tile_radius, current_ts_index, size_even)
assert ma.count_masked(sn_tile) == 0
sn_tile = np.asarray(sn_tile)
if prenorm:
sn_tile = _prenormalize_tile(sn_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[a_month_tile_index, 2+i_spatial_neighbor] = sn_tile
n_samples += 1
sn_lats.append(sn_lat)
sn_lons.append(sn_lon)
for i_distant in range(n_distant):
d_year = data[column_index['a_month{}_d{}_year'.format(a_month, i_distant)]]
d_month = data[column_index['a_month{}_d{}_month'.format(a_month, i_distant)]]
if d_year == a_year and d_month == a_month:
d_tile, d_lat, d_lon = _sample_distant_same(img_dic[d_year], a_lat, a_lon, neighborhood_radius,
distant_radius,
tile_radius, current_ts_index, size_even)
assert ma.count_masked(d_tile) == 0
d_tile = np.asarray(d_tile)
if prenorm:
d_tile = _prenormalize_tile(d_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[a_month_tile_index, 2+n_spatial_neighbor+i_distant] = d_tile
n_samples += 1
d_lats.append(d_lat)
d_lons.append(d_lon)
else:
print('Wrong sampling!')
d_ts_index = d_month - start_month + start_month_index
d_tile, d_lat, d_lon = _sample_distant_diff(img_dic[d_year], tile_radius, d_ts_index, size_even)
assert ma.count_masked(d_tile) == 0
d_tile = np.asarray(d_tile)
if prenorm:
d_tile = _prenormalize_tile(d_tile, monthly_mean[d_ts_index], monthly_std[d_ts_index])
quadruplets_tile[a_month_tile_index, 2 + n_spatial_neighbor + i_distant] = d_tile
n_samples += 1
d_lats.append(d_lat)
d_lons.append(d_lon)
# output dimension: [n_samples, n_timesteps, 1+n_temporal_neighbor+n_spatial_neighbor+n_distant, n_variables, 50, 50]
tiles.append(quadruplets_tile)
if len(tiles) == n_tiles_per_file or (n_tiles + len(tiles)) == n_quadruplets:
if n_tiles_per_file > 1:
np.save('{}/{}_{}.npy'.format(output_dir, n_tiles, n_tiles + len(tiles) - 1), np.asarray(tiles, dtype=np.float32))
else:
np.save('{}/{}.npy'.format(output_dir, n_tiles), np.asarray(tiles, dtype=np.float32))
assert n_samples == len(tiles) * n_t * (1 + 1 + n_spatial_neighbor + n_distant), n_samples
n_tiles += len(tiles)
tiles = []
n_samples = 0
plot_sampled_centers(a_lats, a_lons, img_shape, output_dir, 'a_dims')
plot_sampled_centers(sn_lats, sn_lons, img_shape, output_dir, 'sn_dims')
plot_sampled_centers(d_lats, d_lons, img_shape, output_dir, 'd_dims')
def _prenormalize_tile(tile, means, stds):
means = np.asarray(means).reshape((-1, 1, 1))
stds = np.asarray(stds).reshape((-1, 1, 1))
return (tile - means) / stds
def _get_lat_lon_range(a_lat, a_lon, tile_radius, size_even):
lat0, lon0 = a_lat - tile_radius, a_lon - tile_radius
lat1 = a_lat + tile_radius - 1 if size_even else a_lat + tile_radius
lon1 = a_lon + tile_radius - 1 if size_even else a_lon + tile_radius
return lat0, lat1, lon0, lon1
def _sample_neighbor(img, a_lat, a_lon, neighborhood_radius, tile_radius, timestep, size_even):
if neighborhood_radius is None:
return _sample_distant_diff(img, tile_radius, timestep, size_even)
_, _, img_h, img_w = img.shape
while True:
n_lat, n_lon = a_lat, a_lon
while n_lat == a_lat and n_lon == a_lon:
n_lat = np.random.randint(max(a_lat - neighborhood_radius, tile_radius),
min(a_lat + neighborhood_radius, img_h - tile_radius))
n_lon = np.random.randint(max(a_lon - neighborhood_radius, tile_radius),
min(a_lon + neighborhood_radius, img_w - tile_radius))
lat0, lat1, lon0, lon1 = _get_lat_lon_range(n_lat, n_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1+1, lon0:lon1+1]
if ma.count_masked(tile) == 0:
break
return tile, n_lat, n_lon
def _sample_distant_same(img, a_lat, a_lon, neighborhood_radius, distant_radius, tile_radius, timestep, size_even):
if neighborhood_radius is None:
return _sample_distant_diff(img, tile_radius, timestep, size_even)
_, _, img_h, img_w = img.shape
while True:
d_lat, d_lon = a_lat, a_lon
if distant_radius is None:
while (d_lat >= a_lat - neighborhood_radius) and (d_lat <= a_lat + neighborhood_radius):
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
while (d_lon >= a_lon - neighborhood_radius) and (d_lon <= a_lon + neighborhood_radius):
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
else:
while ((d_lat >= a_lat - neighborhood_radius) and (d_lat <= a_lat + neighborhood_radius)) \
or d_lat >= a_lat + distant_radius \
or d_lat <= a_lat - distant_radius:
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
while ((d_lon >= a_lon - neighborhood_radius) and (d_lon <= a_lon + neighborhood_radius))\
or d_lon >= a_lon + distant_radius \
or d_lon <= a_lon - distant_radius:
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(d_lat, d_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1 + 1, lon0:lon1 + 1]
if ma.count_masked(tile) == 0:
break
return tile, d_lat, d_lon
def _sample_distant_diff(img, tile_radius, timestep, size_even):
_, _, img_h, img_w = img.shape
while True:
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(d_lat, d_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1 + 1, lon0:lon1 + 1]
if ma.count_masked(tile) == 0:
break
return tile, d_lat, d_lon
def plot_sampled_centers(lats, lons, img_shape, out_dir, name):
c, t, h, w = img_shape
plt.scatter(lons, lats, s=5)
plt.axis([0, w, 0, h])
plt.savefig('{}/{}.jpg'.format(out_dir, name))
plt.close()
if __name__ == '__main__':
generate_dims_for_counties(croptype='soybeans')
get_imgs_and_timesteps_for_quadruplets('../../experiment_data/spatial_temporal/counties/dim_y.csv', 3, 9, 1,
sample_case='hard')
get_imgs_and_timesteps_for_quadruplets('../../experiment_data/spatial_temporal/counties/dim_y.csv', 3, 9, 1,
sample_case='soft')
generate_training_for_counties(out_dir='../../experiment_data/spatial_temporal/counties',
img_dir='../../experiment_data/spatial_temporal/nc_files',
start_month=3, end_month=9, start_month_index=1, n_spatial_neighbor=1, n_distant=1,
img_timestep_quadruplets=
'../../experiment_data/spatial_temporal/counties/img_timestep_quadruplets.csv',
img_size=50, neighborhood_radius=100)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/sample_quadruplets/sample_for_counties.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import numpy.ma as ma
import matplotlib.pyplot as plt
import pickle
import sys
sys.path.append("..")
from data_preprocessing import CLIMATE_VARS
# output dimension: [n_samples, n_timesteps, 1+n_temporal_neighbor+n_spatial_neighbor+n_distant, n_variables, 50, 50]
def generate_training_for_pretrained(out_dir, img_dir, n_quadruplets, start_year, end_year, start_month, end_month, start_month_index, n_spatial_neighbor, n_distant,
img_size, neighborhood_radius, distant_radius=None, prenorm=True):
if distant_radius is None:
output_dir = '{}/pretrained_nr_{}'.format(out_dir, neighborhood_radius)
else:
output_dir = '{}/pretrained_nr_{}_dr{}'.format(out_dir, neighborhood_radius, distant_radius)
os.makedirs(output_dir, exist_ok=True)
size_even = (img_size % 2 == 0)
tile_radius = img_size // 2
sampled_years = [np.random.randint(start_year, end_year+1) for _ in range(n_quadruplets)]
sampled_months = [np.random.randint(start_month, end_month+1) for _ in range(n_quadruplets)]
# monthly mean
# {0: [57.3017, 0.15911582, 0.30263194, 349.417, 277.6782, 268.29166, 19.372774, 38.962997, 48.396523],
# 1: [73.980095, 0.19241332, 0.35961938, 349.417, 286.09885, 273.22183, 19.372774, 38.962997, 48.396523],
# 2: [87.33122, 0.27037004, 0.46616226, 349.417, 294.85776, 279.05136, 19.372774, 38.962997, 48.396523],
# 3: [106.66116, 0.38423842, 0.5934064, 349.417, 299.4103, 284.4472, 19.372774, 38.962997, 48.396523],
# 4: [111.04675, 0.46401384, 0.6796355, 349.417, 302.36234, 289.90076, 19.372774, 38.962997, 48.396523],
# 5: [100.82861, 0.5001915, 0.7197062, 349.417, 303.2484, 292.21436, 19.372774, 38.962997, 48.396523],
# 6: [93.255714, 0.4844686, 0.71926653, 349.417, 302.26636, 291.2553, 19.372774, 38.962997, 48.396523],
# 7: [88.390526, 0.41577676, 0.67133075, 349.417, 299.28165, 287.00778, 19.372774, 38.962997, 48.396523]}
# monthly std
# {0: [49.994095, 0.09068172, 0.18281896, 258.4355, 9.178257, 8.026086, 17.579718, 24.665548, 20.690763],
# 1: [56.513268, 0.084073044, 0.15483402, 258.4355, 7.8059173, 6.699706, 17.579718, 24.665548, 20.690763],
# 2: [53.212543, 0.11533181, 0.17148177, 258.4355, 5.039537, 5.1716127, 17.579718, 24.665548, 20.690763],
# 3: [60.39661, 0.1439103, 0.18301234, 258.4355, 4.484442, 4.53816, 17.579718, 24.665548, 20.690763],
# 4: [60.862434, 0.13719948, 0.16091526, 258.4355, 4.6158304, 3.6706781, 17.579718, 24.665548, 20.690763],
# 5: [58.666737, 0.13492998, 0.15656078, 258.4355, 5.140572, 3.0179217, 17.579718, 24.665548, 20.690763],
# 6: [60.55039, 0.14212538, 0.16778886, 258.4355, 4.962786, 3.2834055, 17.579718, 24.665548, 20.690763],
# 7: [64.83031, 0.12455596, 0.17052796, 258.4355, 4.5033474, 3.5745926, 17.579718, 24.665548, 20.690763]}
mean_file = open('{}/monthly_channel_wise_mean.pkl'.format(img_dir), 'rb')
std_file = open('{}/monthly_channel_wise_std.pkl'.format(img_dir), 'rb')
monthly_mean = pickle.load(mean_file)
monthly_std = pickle.load(std_file)
img_dic = {}
for year in range(2000, 2019):
fh = Dataset('{}/{}.nc'.format(img_dir, year))
img = []
for cv in CLIMATE_VARS:
img.append(fh.variables[cv][:])
img = ma.asarray(img)
# (n_variables, n_timesteps, n_lat, n_lon)
img_shape = img.shape
fh.close()
img_dic[year] = img
print('Number of quadruplets: {}'.format(n_quadruplets))
n_tiles_per_file = 1000
n_tiles = 0
tiles = []
n_samples = 0
a_lats, a_lons, sn_lats, sn_lons, d_lats, d_lons = [], [], [], [], [], []
print('Start sampling...')
for a_year, a_month in zip(sampled_years, sampled_months):
quadruplets_tile = np.zeros(((1+1+n_spatial_neighbor+n_distant), len(CLIMATE_VARS), img_size, img_size))
current_ts_index = a_month-start_month+start_month_index
current_tile, a_lat, a_lon = _sample_anchor(img_dic[a_year], tile_radius, distant_radius, current_ts_index, size_even)
assert ma.count_masked(current_tile) == 0
current_tile = np.asarray(current_tile)
if prenorm:
current_tile = _prenormalize_tile(current_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[0] = current_tile
n_samples += 1
a_lats.append(a_lat)
a_lons.append(a_lon)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(a_lat, a_lon, tile_radius, size_even)
tn_tile = img_dic[a_year][:, current_ts_index-1, lat0:lat1 + 1, lon0:lon1 + 1]
assert ma.count_masked(tn_tile) == 0
tn_tile = np.asarray(tn_tile)
if prenorm:
tn_tile = _prenormalize_tile(tn_tile, monthly_mean[current_ts_index-1], monthly_std[current_ts_index-1])
quadruplets_tile[1] = tn_tile
n_samples += 1
for i_spatial_neighbor in range(n_spatial_neighbor):
sn_tile, sn_lat, sn_lon = _sample_neighbor(img_dic[a_year], a_lat, a_lon, neighborhood_radius,
tile_radius, current_ts_index, size_even)
assert ma.count_masked(sn_tile) == 0
sn_tile = np.asarray(sn_tile)
if prenorm:
sn_tile = _prenormalize_tile(sn_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[2+i_spatial_neighbor] = sn_tile
n_samples += 1
sn_lats.append(sn_lat)
sn_lons.append(sn_lon)
for i_distant in range(n_distant):
d_tile, d_lat, d_lon = _sample_distant_same(img_dic[a_year], a_lat, a_lon, neighborhood_radius,
distant_radius,
tile_radius, current_ts_index, size_even)
assert ma.count_masked(d_tile) == 0
d_tile = np.asarray(d_tile)
if prenorm:
d_tile = _prenormalize_tile(d_tile, monthly_mean[current_ts_index], monthly_std[current_ts_index])
quadruplets_tile[2+n_spatial_neighbor+i_distant] = d_tile
n_samples += 1
d_lats.append(d_lat)
d_lons.append(d_lon)
# output dimension: [n_samples, n_timesteps, 1+n_temporal_neighbor+n_spatial_neighbor+n_distant, n_variables, 50, 50]
tiles.append(quadruplets_tile)
if len(tiles) == n_tiles_per_file or (n_tiles + len(tiles)) == n_quadruplets:
if n_tiles_per_file > 1:
np.save('{}/{}_{}.npy'.format(output_dir, n_tiles, n_tiles + len(tiles) - 1), np.asarray(tiles, dtype=np.float32))
else:
np.save('{}/{}.npy'.format(output_dir, n_tiles), np.asarray(tiles, dtype=np.float32))
assert n_samples == len(tiles) * (1 + 1 + n_spatial_neighbor + n_distant), n_samples
n_tiles += len(tiles)
tiles = []
n_samples = 0
plot_sampled_centers(a_lats, a_lons, img_shape, output_dir, 'a_dims')
plot_sampled_centers(sn_lats, sn_lons, img_shape, output_dir, 'sn_dims')
plot_sampled_centers(d_lats, d_lons, img_shape, output_dir, 'd_dims')
def _prenormalize_tile(tile, means, stds):
means = np.asarray(means).reshape((-1, 1, 1))
stds = np.asarray(stds).reshape((-1, 1, 1))
return (tile - means) / stds
def _get_lat_lon_range(a_lat, a_lon, tile_radius, size_even):
lat0, lon0 = a_lat - tile_radius, a_lon - tile_radius
lat1 = a_lat + tile_radius - 1 if size_even else a_lat + tile_radius
lon1 = a_lon + tile_radius - 1 if size_even else a_lon + tile_radius
return lat0, lat1, lon0, lon1
def _sample_anchor(img, tile_radius, distant_radius, timestep, size_even):
_, _, img_h, img_w = img.shape
while True:
a_lat = np.random.randint(tile_radius, img_h - tile_radius)
a_lon = np.random.randint(tile_radius, img_w - tile_radius)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(a_lat, a_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1 + 1, lon0:lon1 + 1]
# guarantee that distant tile can be sampled
d_lat0, d_lat1, d_lon0, d_lon1 = _get_lat_lon_range(a_lat, a_lon, distant_radius, True)
big_tile = img[:, timestep, d_lat0:d_lat1 + 1, d_lon0:d_lon1 + 1]
if ma.count_masked(tile) == 0 and ma.count_masked(big_tile) == 0:
break
return tile, a_lat, a_lon
def _sample_neighbor(img, a_lat, a_lon, neighborhood_radius, tile_radius, timestep, size_even):
if neighborhood_radius is None:
return _sample_distant_diff(img, tile_radius, timestep, size_even)
_, _, img_h, img_w = img.shape
while True:
n_lat, n_lon = a_lat, a_lon
while n_lat == a_lat and n_lon == a_lon:
n_lat = np.random.randint(max(a_lat - neighborhood_radius, tile_radius),
min(a_lat + neighborhood_radius, img_h - tile_radius))
n_lon = np.random.randint(max(a_lon - neighborhood_radius, tile_radius),
min(a_lon + neighborhood_radius, img_w - tile_radius))
lat0, lat1, lon0, lon1 = _get_lat_lon_range(n_lat, n_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1+1, lon0:lon1+1]
if ma.count_masked(tile) == 0:
break
return tile, n_lat, n_lon
def _sample_distant_same(img, a_lat, a_lon, neighborhood_radius, distant_radius, tile_radius, timestep, size_even):
if neighborhood_radius is None:
return _sample_distant_diff(img, tile_radius, timestep, size_even)
_, _, img_h, img_w = img.shape
while True:
d_lat, d_lon = a_lat, a_lon
if distant_radius is None:
while (d_lat >= a_lat - neighborhood_radius) and (d_lat <= a_lat + neighborhood_radius):
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
while (d_lon >= a_lon - neighborhood_radius) and (d_lon <= a_lon + neighborhood_radius):
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
else:
while ((d_lat >= a_lat - neighborhood_radius) and (d_lat <= a_lat + neighborhood_radius)) \
or d_lat >= a_lat + distant_radius \
or d_lat <= a_lat - distant_radius:
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
while ((d_lon >= a_lon - neighborhood_radius) and (d_lon <= a_lon + neighborhood_radius))\
or d_lon >= a_lon + distant_radius \
or d_lon <= a_lon - distant_radius:
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(d_lat, d_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1 + 1, lon0:lon1 + 1]
if ma.count_masked(tile) == 0:
break
return tile, d_lat, d_lon
def _sample_distant_diff(img, tile_radius, timestep, size_even):
_, _, img_h, img_w = img.shape
while True:
d_lat = np.random.randint(tile_radius, img_h - tile_radius)
d_lon = np.random.randint(tile_radius, img_w - tile_radius)
lat0, lat1, lon0, lon1 = _get_lat_lon_range(d_lat, d_lon, tile_radius, size_even)
tile = img[:, timestep, lat0:lat1 + 1, lon0:lon1 + 1]
if ma.count_masked(tile) == 0:
break
return tile, d_lat, d_lon
def plot_sampled_centers(lats, lons, img_shape, out_dir, name):
c, t, h, w = img_shape
plt.scatter(lons, lats, s=5)
plt.axis([0, w, 0, h])
plt.savefig('{}/{}.jpg'.format(out_dir, name))
plt.close()
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/sample_quadruplets/sample_for_pretrained.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .counties_plot import counties_plot, save_colorbar
__all__ = ['counties_plot', 'save_colorbar']
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/plot/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
# mean of lats: 40.614586, mean of lons: -121.24792
def plot_local(in_file, x_axis, y_axis):
fh = Dataset(in_file, 'r')
lats = fh.variables['lat'][:]
lons = fh.variables['lon'][:]
x_indices = [(np.abs(lons-i)).argmin() for i in x_axis]
y_indices = [(np.abs(lats-i)).argmin() for i in y_axis]
for v in fh.variables.keys():
if v not in ['lat', 'lon']:
values = fh.variables[v][:]
plt.imshow(values, interpolation='none', cmap=plt.get_cmap("jet"))
plt.title(v)
plt.gca().set_xticks(x_indices)
plt.gca().set_yticks(y_indices)
plt.gca().set_xticklabels(x_axis)
plt.gca().set_yticklabels(y_axis)
plt.colorbar()
plt.savefig('../../processed_data/local/ca_20190604/{}.jpg'.format(v))
plt.close()
def plot_landsat(in_file, x_axis, y_axis):
fh = Dataset(in_file, 'r')
lats = fh.variables['lat'][:][::-1]
lons = fh.variables['lon'][:]
x_indices = [(np.abs(lons - i)).argmin() for i in x_axis]
y_indices = [(np.abs(lats - i)).argmin() for i in y_axis]
titles = ["Band 1 Ultra Blue", "Band 2 Blue", "Band 3 Green",
"Band 4 Red", "Band 5 Near Infrared",
"Band 6 Shortwave Infrared 1", "Band 7 Shortwave Infrared 2"]
for title, v in zip(titles, range(1, 8)):
values = np.flipud(fh.variables['band{}'.format(v)][:])
plt.imshow(values, interpolation='none', cmap=plt.get_cmap("jet"), vmin=0, vmax=10000)
plt.title(title)
plt.gca().set_xticks(x_indices)
plt.gca().set_yticks(y_indices)
plt.gca().set_xticklabels(x_axis)
plt.gca().set_yticklabels(y_axis)
plt.colorbar()
plt.savefig('../../processed_data/local/ca_20190604/band{}.jpg'.format(v))
plt.close()
if __name__ == '__main__':
y_axis = [41.20, 40.95, 40.70, 40.45, 40.20]
x_axis = [-122.0, -121.75, -121.5, -121.25, -121.0, -120.75, -120.5]
plot_local('../../processed_data/local/ca_20190604/elevation.nc', x_axis, y_axis)
plot_local('../../processed_data/local/ca_20190604/lai.nc', x_axis, y_axis)
plot_local('../../processed_data/local/ca_20190604/lst.nc', x_axis, y_axis)
plot_local('../../processed_data/local/ca_20190604/nws_precip.nc', x_axis, y_axis)
plot_local('../../processed_data/local/ca_20190604/soil_fraction.nc', x_axis, y_axis)
plot_local('../../processed_data/local/ca_20190604/soil_moisture.nc', x_axis, y_axis)
plot_landsat('../../processed_data/local/ca_20190604/landsat.nc', x_axis, y_axis)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/plot/plot_local.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bs4 import BeautifulSoup
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import numpy as np
import seaborn as sns
# colors = sns.color_palette("RdYlBu", 10).as_hex()
colors = ['#cdeaf3', '#9bcce2', '#fff1aa', '#fece7f', '#fa9b58', '#ee613e', '#d22b27']
def counties_plot(data_dict, savepath, quantiles):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
# load the svg file
svg = Path('../../processed_data/counties/counties.svg').open('r').read()
# Load into Beautiful Soup
soup = BeautifulSoup(svg, features="html.parser")
# Find counties
paths = soup.findAll('path')
path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;stroke-width:0.1' \
';stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start' \
':none;stroke-linejoin:bevel;fill:'
for p in paths:
if p['id'] not in ["State_Lines", "separator"]:
try:
rate = data_dict[p['id']]
except KeyError:
continue
if rate > quantiles[0.95]:
color_class = 6
elif rate > quantiles[0.8]:
color_class = 5
elif rate > quantiles[0.6]:
color_class = 4
elif rate > quantiles[0.4]:
color_class = 3
elif rate > quantiles[0.2]:
color_class = 2
elif rate > quantiles[0.05]:
color_class = 1
else:
color_class = 0
color = colors[color_class]
p['style'] = path_style + color
soup = soup.prettify()
with savepath.open('w') as f:
f.write(soup)
def save_colorbar(savedir, quantiles):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.02, 0.8])
cmap = mpl.colors.ListedColormap(colors[1:-1])
cmap.set_over(colors[-1])
cmap.set_under(colors[0])
bounds = [quantiles[x] for x in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[quantiles[0.0]] + bounds + [quantiles[1.0]],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='vertical')
plt.savefig('{}/colorbar.jpg'.format(savedir), dpi=300, bbox_inches='tight')
def process_yield_data():
important_columns = ['Year', 'State ANSI', 'County ANSI', 'Value']
yield_data = pd.read_csv('../../processed_data/crop_yield/yield_data.csv').dropna(
subset=important_columns, how='any')[['Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['Year', 'State', 'County', 'Value']
yield_per_year_dic = defaultdict(dict)
for yd in yield_data.itertuples():
year, state, county, value = yd.Year, yd.State, int(yd.County), yd.Value
state = str(state).zfill(2)
county = str(county).zfill(3)
yield_per_year_dic[year][state+county] = value
return yield_per_year_dic
if __name__ == '__main__':
yield_data = process_yield_data()
for year in range(2003, 2017):
counties_plot(yield_data[year], Path('../../processed_data/crop_yield/plots/{}_yield.html'.format(year)))
values = np.array(list(yield_data[year].values()))
print(year, np.percentile(values, 0), np.percentile(values, 25), np.percentile(values, 50),
np.percentile(values, 75), np.percentile(values, 100))
save_colorbar('../../processed_data/crop_yield/plots')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/plot/counties_plot.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .merge_various_days import merge_various_days
__all__ = ['merge_various_days']
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/merge/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ..utils import generate_doy
import os
import numpy as np
import datetime as dt
from datetime import datetime
from netCDF4 import Dataset
FIRST_DATE = dt.date(2001, 1, 1)
def merge_various_days(in_path, out_path, fout_name, doy_start=None, doy_end=None, select_vars=None):
fh_out = Dataset(os.path.join(out_path, fout_name + '.nc'), 'w')
num = 0
var_list = []
if doy_start is None or doy_end is None:
fnames = [fname[:-3] for fname in os.listdir(in_path) if fname.endswith(".nc")]
fnames = sorted(fnames, key=lambda x: datetime.strptime("".join(c for c in x if c.isdigit()), '%Y%m%d'))
else:
fnames = list(generate_doy(doy_start, doy_end, ""))
num_files = len(fnames)
print("Number of files", num_files)
for nc_file in fnames:
nc_doy = "".join(c for c in nc_file if c.isdigit())
fh_in = Dataset(os.path.join(in_path, nc_file + ".nc"), 'r')
n_dim = {}
if num == 0:
for name, dim in fh_in.dimensions.items():
n_dim[name] = len(dim)
fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)
fh_out.createDimension('time', num_files)
outVar = fh_out.createVariable('time', 'int', ("time",))
outVar[:] = range(1, num_files + 1)
select_vars = list(fh_in.variables.keys()) if select_vars is None else select_vars
for v_name, varin in fh_in.variables.items():
if v_name == 'lat' or v_name == 'lon':
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
else:
if v_name in select_vars:
var_list.append(v_name)
outVar = fh_out.createVariable(v_name, varin.datatype, ("time", "lat", "lon",))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = np.empty((num_files, n_dim['lat'], n_dim['lon']))
current_date = datetime.strptime(nc_doy, "%Y%m%d").date()
fh_out.variables['time'][num] = (current_date - FIRST_DATE).days
for vname in var_list:
var_value = fh_in.variables[vname][:]
fh_out.variables[vname][num, :, :] = var_value[:]
num += 1
fh_in.close()
fh_out.close()
print(num, num_files)
assert (num == num_files)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/merge/merge_various_days.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from collections import defaultdict
import numpy as np
import numpy.ma as ma
from pathlib import Path
from netCDF4 import Dataset
from operator import itemgetter
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from data_preprocessing.plot import counties_plot, save_colorbar
state_dic = {10: 'Delaware', 1: 'Alabama', 11: 'District of Columbia', 12: 'Florida', 13: 'Georgia', 15: 'Hawaii',
16: 'Idaho', 17: 'Illinois', 18: 'Indiana', 19: 'Iowa', 20: 'Kansas', 2: 'Alaska', 21: 'Kentucky',
22: 'Louisiana', 23: 'Maine', 24: 'Maryland', 25: 'Massachusetts', 26: 'Michigan', 27: 'Minnesota',
28: 'Mississippi', 29: 'Missouri', 30: 'Montana', 31: 'Nebraska', 32: 'Nevada', 33: 'New Hampshire',
34: 'New Jersey', 35: 'New Mexico', 36: 'New York', 37: 'North Carolina', 38: 'North Dakota', 39: 'Ohio',
40: 'Oklahoma', 4: 'Arizona', 41: 'Oregon', 42: 'Pennsylvania', 44: 'Rhode Island', 45: 'South Carolina',
46: 'South Dakota', 47: 'Tennessee', 48: 'Texas', 49: 'Utah', 50: 'Vermont', 5: 'Arkansas', 51: 'Virginia',
53: 'Washington', 54: 'West Virginia', 55: 'Wisconsin', 56: 'Wyoming', 6: 'California', 8: 'Colorado',
9: 'Connecticut'}
def clean_data(in_file, out_file, dropyear):
yield_data = pd.read_csv(in_file)
important_columns = ['Year', 'State ANSI', 'County ANSI', 'Value']
yield_data = yield_data.dropna(subset=important_columns, how='any')
yield_data = yield_data[yield_data.Year != 1999]
yield_data.to_csv(out_file)
def plot_data(in_file, out_folder):
yield_data = pd.read_csv(in_file)[['Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['Year', 'State', 'County', 'Value']
if yield_data.Value.dtype != float:
yield_data['Value'] = yield_data['Value'].str.replace(',', '')
yield_data = yield_data.astype({'Year': int, 'State': int, 'County': int, 'Value': float})
quantiles = {}
for q in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]:
quantiles[q] = yield_data.Value.quantile(q)
quantiles[0.0] = yield_data.Value.min()
quantiles[1.0] = yield_data.Value.max()
print(quantiles)
yield_per_year_dic = defaultdict(dict)
for yd in yield_data.itertuples():
year, state, county, value = yd.Year, yd.State, int(yd.County), yd.Value
state = str(state).zfill(2)
county = str(county).zfill(3)
yield_per_year_dic[year][state + county] = value
for year in np.unique(list(yield_per_year_dic.keys())):
counties_plot(yield_per_year_dic[year], Path('{}/{}_yield.html'.format(out_folder, year)), quantiles)
save_colorbar(out_folder, quantiles)
def get_counties_for_crop(county_file, crop_file, out_file):
yield_data = pd.read_csv(crop_file)[['Year', 'State ANSI', 'County ANSI', 'Value']]
counties = yield_data.drop_duplicates(subset=['State ANSI', 'County ANSI'])
counties.columns = ['Year', 'State', 'County', 'Value']
crop_counties = [int(str(int(yd.State)).zfill(2) + str(int(yd.County)).zfill(3)) for yd in counties.itertuples()]
fh_in = Dataset(county_file, 'r')
fh_out = Dataset(out_file, 'w')
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name in ['lat', 'lon']:
outVar[:] = varin[:]
else:
mask_values = np.in1d(varin[:], crop_counties).reshape(varin[:].shape)
outVar[:] = ma.array(varin[:], mask=~mask_values)
fh_in.close()
fh_out.close()
def plot_counties(in_file):
fh = Dataset(in_file, 'r')
county_labels = fh.variables['county_label'][:]
print(len(np.unique(county_labels.compressed())))
fh.close()
county_labels = np.unique(county_labels.compressed())
county_labels = [[str(x).zfill(5)[:2], str(x).zfill(5)[2:]] for x in county_labels]
data_dic = {}
for state, county in county_labels:
data_dic[state + county] = 100
fake_quantiles = {x: 1 for x in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]}
counties_plot(data_dic, Path('../../processed_data/crop_yield/{}.html'.format(in_file[:-3])), fake_quantiles)
return data_dic.keys()
def get_counties_for_crops():
# soybeans
get_counties_for_crop('../../processed_data/counties/us_counties.nc',
'../../processed_data/crop_yield/soybeans_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/soybeans_us_counties.nc')
plot_counties('../../processed_data/crop_yield/county_locations/soybeans_us_counties.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro.nc',
'../../processed_data/crop_yield/soybeans_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/soybeans_us_counties_cro.nc')
plot_counties('../../processed_data/crop_yield/county_locations/soybeans_us_counties_cro.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro_cvm.nc',
'../../processed_data/crop_yield/soybeans_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/soybeans_us_counties_cro_cvm.nc')
plot_counties('../../processed_data/crop_yield/county_locations/soybeans_us_counties_cro_cvm.nc')
# corn
get_counties_for_crop('../../processed_data/counties/us_counties.nc',
'../../processed_data/crop_yield/corn_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/corn_us_counties.nc')
plot_counties('../../processed_data/crop_yield/county_locations/corn_us_counties.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro.nc',
'../../processed_data/crop_yield/corn_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/corn_us_counties_cro.nc')
plot_counties('../../processed_data/crop_yield/county_locations/corn_us_counties_cro.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro_cvm.nc',
'../../processed_data/crop_yield/corn_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/corn_us_counties_cro_cvm.nc')
plot_counties('../../processed_data/crop_yield/county_locations/corn_us_counties_cro_cvm.nc')
# cotton
get_counties_for_crop('../../processed_data/counties/us_counties.nc',
'../../processed_data/crop_yield/cotton_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/cotton_us_counties.nc')
plot_counties('../../processed_data/crop_yield/county_locations/cotton_us_counties.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro.nc',
'../../processed_data/crop_yield/cotton_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/cotton_us_counties_cro.nc')
plot_counties('../../processed_data/crop_yield/county_locations/cotton_us_counties_cro.nc')
get_counties_for_crop('../../processed_data/counties/us_counties_cro_cvm.nc',
'../../processed_data/crop_yield/cotton_1999_2018.csv',
'../../processed_data/crop_yield/county_locations/cotton_us_counties_cro_cvm.nc')
plot_counties('../../processed_data/crop_yield/county_locations/cotton_us_counties_cro_cvm.nc')
def analyze_patch_size(croptype):
fh_us = Dataset('../../processed_data/crop_yield/{}_us_counties.nc'.format(croptype), 'r')
fh_us_cro = Dataset('../../processed_data/crop_yield/{}_us_counties_cro.nc'.format(croptype), 'r')
fh_us_cro_cvm = Dataset('../../processed_data/crop_yield/{}_us_counties_cro_cvm.nc'.format(croptype), 'r')
us_counties, us_sizes = np.unique(fh_us.variables['county_label'][:].compressed(), return_counts=True)
us_cro_counties, us_cro_sizes = np.unique(fh_us_cro.variables['county_label'][:].compressed(), return_counts=True)
us_cro_cvm_counties, us_cro_cvm_sizes = np.unique(fh_us_cro_cvm.variables['county_label'][:].compressed(),
return_counts=True)
us_dic = defaultdict(lambda: -1, {k: v for k, v in zip(us_counties, us_sizes)})
us_cro_dic = defaultdict(lambda: -1, {k: v for k, v in zip(us_cro_counties, us_cro_sizes)})
us_cro_cvm_dic = defaultdict(lambda: -1, {k: v for k, v in zip(us_cro_cvm_counties, us_cro_cvm_sizes)})
for k in us_dic:
print(k, us_dic[k], us_cro_dic[k], us_cro_cvm_dic[k])
def analyze_harvested_size(croptype):
print(croptype)
harvested_data = pd.read_csv('../../processed_data/crop_yield/harvested_areas/{}_1999_2018.csv'.format(croptype))
important_columns = ['Year', 'State ANSI', 'County ANSI', 'Value']
harvested_data = harvested_data.dropna(subset=important_columns, how='any')[['Year', 'State ANSI', 'County ANSI',
'Value']]
harvested_data.columns = ['Year', 'State', 'County', 'Value']
if harvested_data.Value.dtype != float:
harvested_data['Value'] = harvested_data['Value'].str.replace(',', '')
harvested_data = harvested_data.astype({'Year': int, 'State': int, 'County': int, 'Value': float})
# convert from acres to square kilometers
harvested_data['Value'] = harvested_data['Value'] / 247.105
print(harvested_data.Value.describe(percentiles=[.1, .25, .5, .75, .9]))
print(harvested_data[harvested_data.Value < 625].count().Year / len(harvested_data))
# County-level crop production is calculated by multiplying crop yield with harvest area.
def analyze_productions(croptype):
print(croptype)
yield_data = pd.read_csv('../../processed_data/crop_yield/{}_1999_2018.csv'.format(croptype))[['Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['Year', 'State', 'County', 'Value']
if yield_data.Value.dtype != float:
yield_data['Value'] = yield_data['Value'].str.replace(',', '')
yield_data = yield_data.astype({'Year': int, 'State': int, 'County': int, 'Value': float})
harvested_data = pd.read_csv('../../processed_data/crop_yield/harvested_areas/{}_1999_2018.csv'.format(croptype))
important_columns = ['Year', 'State ANSI', 'County ANSI', 'Value']
harvested_data = harvested_data.dropna(subset=important_columns, how='any')[['Year', 'State ANSI', 'County ANSI',
'Value']]
harvested_data.columns = ['Year', 'State', 'County', 'Value']
if harvested_data.Value.dtype != float:
harvested_data['Value'] = harvested_data['Value'].str.replace(',', '')
harvested_data = harvested_data.astype({'Year': int, 'State': int, 'County': int, 'Value': float})
# convert from acres to square kilometers
harvested_data['Value'] = harvested_data['Value']
production_data = pd.DataFrame.merge(yield_data, harvested_data, on=['Year', 'State', 'County'], how='outer',
suffixes=['_yield', '_harvest'])
production_data['Production'] = production_data['Value_yield'] * production_data['Value_harvest']
state_productions = production_data.groupby(['State'])['Production'].sum().to_dict()
states, productions = zip(*sorted(state_productions.items(), key=itemgetter(1), reverse=True))
total = sum(productions)
i = 0
for state, production, cum_perc in zip(states, productions, (100*subtotal/total for subtotal in np.cumsum(productions))):
print(i, state, state_dic[state], production, cum_perc)
i += 1
if __name__ == '__main__':
# clean_data('../../processed_data/crop_yield/origi/soybeans_1999_2018.csv',
# '../../processed_data/crop_yield/soybeans_1999_2018.csv')
# clean_data('../../processed_data/crop_yield/origi/soybeans_1999_2018.csv',
# '../../processed_data/crop_yield/soybeans_2000_2018.csv', dropyear=1999)
# clean_data('../../processed_data/crop_yield/origi/grain_corn_1999_2018.csv',
# '../../processed_data/crop_yield/corn_1999_2018.csv')
# clean_data('../../processed_data/crop_yield/origi/upland_cotton_1999_2018.csv',
# '../../processed_data/crop_yield/cotton_1999_2018.csv')
# plot_data('../../processed_data/crop_yield/soybeans_1999_2018.csv',
# '../../processed_data/crop_yield/plots/soybeans')
# plot_data('../../processed_data/crop_yield/corn_1999_2018.csv',
# '../../processed_data/crop_yield/plots/corn')
# plot_data('../../processed_data/crop_yield/cotton_1999_2018.csv',
# '../../processed_data/crop_yield/plots/cotton')
# get_counties_for_crops()
# soybeans
# analyze_patch_size('soybeans')
# analyze_harvested_size('soybeans')
# analyze_harvested_size('corn')
# analyze_harvested_size('cotton')
analyze_productions('soybeans')
# analyze_productions('corn')
# analyze_productions('cotton')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/crop_yield.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
def combine_landsat():
fh_out = Dataset('../../processed_data/landsat/20180719.nc', 'w')
flag = False
for i in range(1, 8):
fh_in = Dataset('../../raw_data/landsat/nebraska/SRB{}_20180719.nc'.format(i), 'r')
if not flag:
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
fh_out.createDimension("lat", len(lats))
fh_out.createDimension("lon", len(lons))
for v_name, varin in fh_in.variables.items():
if v_name in ["lat", "lon"]:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
fh_out.variables["lat"][:] = lats[:]
fh_out.variables["lon"][:] = lons[:]
flag = True
for v_name, varin in fh_in.variables.items():
if v_name == 'Band1':
outVar = fh_out.createVariable('band{}'.format(i), varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = ma.masked_less(varin[:], 0)
fh_in.close()
fh_out.close()
# 20190604
def subset_landsat(lat1, lat2, lon1, lon2):
fh_out = Dataset('../../processed_data/landsat/2019155.nc', 'w')
flag = False
lat_indices, lon_indices = None, None
for i in range(1, 8):
fh_in = Dataset('../../raw_data/landsat/SRB{}_doy2019155.nc'.format(i), 'r')
if not flag:
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
lat_indices = np.searchsorted(lats, [lat2, lat1])
lon_indices = np.searchsorted(lons, [lon1, lon2])
lats = lats[lat_indices[0]: lat_indices[1]]
lons = lons[lon_indices[0]: lon_indices[1]]
fh_out.createDimension("lat", len(lats))
fh_out.createDimension("lon", len(lons))
for v_name, varin in fh_in.variables.items():
if v_name in ["lat", "lon"]:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
fh_out.variables["lat"][:] = lats[:]
fh_out.variables["lon"][:] = lons[:]
flag = True
for v_name, varin in fh_in.variables.items():
if v_name == 'Band1':
outVar = fh_out.createVariable('band{}'.format(i), varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = ma.masked_less(varin[lat_indices[0]: lat_indices[1], lon_indices[0]: lon_indices[1]], 0)
fh_in.close()
fh_out.close()
if __name__ == '__main__':
# combine_landsat()
subset_landsat(41.2047, 40.0268, -122.0304, -120.4676)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/landsat.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from netCDF4 import Dataset
# gdal_translate -of netCDF PRISM_ppt_stable_4kmM3_201806_bil.bil PRISM_ppt_stable_4kmM3_201806.nc
def prism_convert_to_nc():
fh_out = open(os.path.join("../..", "prism_convert_to_nc.sh"), "w")
fh_out.write("#!/bin/bash\n")
# m_dic = {"ppt": "M3", "tdmean": "M1", "tmax": "M2", "tmean": "M2", "tmin": "M2", "vpdmax": "M1", "vpdmin": "M1"}
for climate_var in ["ppt", "tdmean", "tmax", "tmean", "tmin", "vpdmax", "vpdmin"]:
for year in range(1999, 2019):
for month in range(1, 13):
fh_out.write("gdal_translate -of netCDF raw_data/prism/monthly/PRISM_{}_stable_4kmM3_198101_201904_bil/"
"PRISM_{}_stable_4kmM3_{}{}_bil.bil processed_data/prism/monthly/{}_{}{}.nc\n"
.format(climate_var, climate_var, year, "{0:02}".format(month), climate_var, year,
"{0:02}".format(month)))
def combine_multivar():
climate_vars = ["ppt", "tdmean", "tmax", "tmean", "tmin", "vpdmax", "vpdmin"]
for year in range(1999, 2019):
for month in range(1, 13):
fh_out = Dataset('../../processed_data/prism/combined_monthly/{}{}.nc'.format(year,
'{0:02}'.format(month)), 'w')
first_flag = True
for v in climate_vars:
fh_in = Dataset('../../processed_data/prism/monthly/{}_{}{}.nc'.format(v, year,
'{0:02}'.format(month), 'r'))
if first_flag:
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
first_flag = False
for v_name, varin in fh_in.variables.items():
if v_name == 'Band1':
outVar = fh_out.createVariable(v, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
fh_in.close()
fh_out.close()
if __name__ == "__main__":
prism_convert_to_nc()
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/prism.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import csv
import sys
sys.path.append("..")
from data_preprocessing.utils import match_lat_lon
from data_preprocessing.plot import counties_plot
def generate_convert_to_nc_script():
fh_out = open('../../processed_data/counties/all/convert_to_nc.sh', 'w')
fh_out.write('#!/bin/bash\n')
for tif_file in os.listdir('../../processed_data/counties/all/tif/'):
if tif_file.endswith('.tif'):
fh_out.write('gdal_translate -of netCDF tif/{} nc/{}.nc\n'.format(tif_file, tif_file[:-4]))
def combine_ncs():
fh_out = Dataset('../../processed_data/counties/us_counties.nc', 'w')
fh_ref = Dataset('../../processed_data/landcover/cropland_cro.nc', 'r')
lats, lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
outVar = fh_out.createVariable('county_label', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
counties_labels = np.full((len(lats), len(lons)), 0)
outVar = fh_out.createVariable('state_code', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
state_code = np.full((len(lats), len(lons)), 0)
outVar = fh_out.createVariable('county_code', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
county_code = np.full((len(lats), len(lons)), 0)
for nc_file in os.listdir('../../processed_data/counties/all/nc/'):
if nc_file.endswith('.nc'):
# ignore Alaska
if nc_file.split('_')[0] == '2':
continue
print(nc_file)
fh_in = Dataset('../../processed_data/counties/all/nc/{}'.format(nc_file), 'r')
local_lats, local_lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
i_lat_start, i_lat_end, i_lon_start, i_lon_end = match_lat_lon(lats, lons, local_lats, local_lons)
local_values = ma.masked_equal(fh_in.variables['Band1'][:], 0.0)
for i, j in zip(*local_values.nonzero()):
state, county = nc_file[:-3].split('_')
state = str(state).zfill(2)
county = str(county).zfill(3)
counties_labels[i+i_lat_start, j+i_lon_start] = int(state+county)
state_code[i+i_lat_start, j+i_lon_start] = int(state)
county_code[i+i_lat_start, j+i_lon_start] = int(county)
fh_in.close()
fh_out.variables['county_label'][:] = ma.masked_equal(counties_labels, 0)
fh_out.variables['state_code'][:] = ma.masked_equal(state_code, 0)
fh_out.variables['county_code'][:] = ma.masked_equal(county_code, 0)
fh_ref.close()
fh_out.close()
def mask_with_landcover(out_file, ref_file):
fh_in = Dataset('../../processed_data/counties/us_counties.nc', 'r')
fh_out = Dataset(out_file, 'w')
fh_ref = Dataset(ref_file, 'r')
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name in ['lat', 'lon']:
outVar[:] = varin[:]
else:
cropland_mask = ma.getmaskarray(fh_ref.variables['cropland'][:])
outVar[:] = ma.array(varin[:], mask=cropland_mask)
fh_in.close()
fh_out.close()
fh_ref.close()
def plot_counties(in_file):
fh = Dataset(in_file, 'r')
county_labels = fh.variables['county_label'][:]
print(len(np.unique(county_labels.compressed())))
fh.close()
county_labels = np.unique(county_labels.compressed())
county_labels = [[str(x).zfill(5)[:2], str(x).zfill(5)[2:]] for x in county_labels]
data_dic = {}
for state, county in county_labels:
data_dic[state+county] = 100
fake_quantiles = {x: 1 for x in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]}
counties_plot(data_dic, Path('../../processed_data/counties/{}.html'.format(in_file[:-3])), fake_quantiles)
return data_dic.keys()
def plot_counties_data(in_file):
county_data = pd.read_csv(in_file)[['StateFips', 'CntyFips']]
county_data.columns = ['State', 'County']
data_dic = {}
for row in county_data.itertuples():
state, county = int(row.State), int(row.County)
state = str(state).zfill(2)
county = str(county).zfill(3)
data_dic[state + county] = 100
fake_quantiles = {x: 1 for x in [0.05, 0.2, 0.4, 0.6, 0.8, 0.95]}
counties_plot(data_dic, Path('../../processed_data/counties/county_data.html'), fake_quantiles)
return data_dic.keys()
def analyze_counties(in_file):
fh = Dataset(in_file, 'r')
counties, sizes = np.unique(fh.variables['county_label'][:].compressed(), return_counts=True)
for county, size in zip(counties, sizes):
print(county, size)
plt.hist(sizes)
plt.show()
def get_county_locations(in_file):
fh = Dataset(in_file, 'r')
lats, lons = fh.variables['lat'][:], fh.variables['lon'][:]
county_labels = fh.variables['county_label'][:]
counties = np.unique(county_labels.compressed())
with open('{}_locations.csv'.format(in_file[:-3]), 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['state', 'county', 'lat', 'lon'])
for county in counties:
selected_rows, selected_cols = np.where(county_labels == county)
lat_mean, lon_mean = np.mean(lats[selected_rows]), np.mean(lons[selected_cols])
line = [str(county).zfill(5)[:2], str(county).zfill(5)[2:], lat_mean, lon_mean]
writer.writerow(line)
if __name__ == '__main__':
# generate_convert_to_nc_script()
combine_ncs()
# mask_with_landcover('../../processed_data/counties/us_counties_cro.nc',
# '../../processed_data/landcover/cropland_cro.nc')
# mask_with_landcover('../../processed_data/counties/us_counties_cro_cvm.nc',
# '../../processed_data/landcover/cropland_cro_cvm.nc')
#
# county_key = plot_counties_data('../../processed_data/counties/county_data.csv')
# us_county_key = plot_counties('../../processed_data/counties/us_counties.nc')
# print([x for x in us_county_key if x not in county_key])
# print([x for x in county_key if x not in us_county_key and not x.startswith('02')])
# plot_counties('../../processed_data/counties/us_counties_cro.nc')
# plot_counties('../../processed_data/counties/us_counties_cro_cvm.nc')
#
# analyze_counties('../../processed_data/counties/us_counties.nc')
# analyze_counties('../../processed_data/counties/us_counties_cro.nc')
# analyze_counties('../../processed_data/counties/us_counties_cro_cvm.nc')
# get_county_locations('../../processed_data/counties/us_counties.nc')
# get_county_locations('../../processed_data/counties/us_counties_cro.nc')
# get_county_locations('../../processed_data/counties/us_counties_cro_cvm.nc')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/county_locations.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .cdl import cdl_convert_to_nc
__all__ = ["cdl_convert_to_nc"]
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import os
from osgeo import gdal, osr
import numpy as np
from pyproj import Proj, transform
import numpy.ma as ma
# gdalwarp -t_srs '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs' 2018_30m_cdls.img 2018_30m_cdls.tif
# gdal_translate -of netCDF PRISM_ppt_stable_4kmM3_201806_bil.bil PRISM_ppt_stable_4kmM3_201806.nc
def cdl_convert_to_nc(in_dir, in_file, out_dir, out_file):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
raster = gdal.Open(os.path.join(in_dir, in_file))
cdl_values = raster.ReadAsArray()
geo = raster.GetGeoTransform()
projWKT = raster.GetProjection()
proj = osr.SpatialReference()
proj.ImportFromWkt(projWKT)
# n_lat, n_lon = np.shape(cdl_values)
# b = raster.GetGeoTransform()
# lons = (np.arange(n_lon) * b[1] + b[0])
# lats = (np.arange(n_lat) * b[5] + b[3])
#
# fh_out = Dataset(os.path.join(out_dir, out_file), "w")
# fh_out.createDimension("lat", len(lats))
# fh_out.createDimension("lon", len(lons))
#
# outVar = fh_out.createVariable('lat', float, ('lat'))
# outVar.setncatts({"units": "degree_north"})
# outVar[:] = lats[:]
# outVar = fh_out.createVariable('lon', float, ('lon'))
# outVar.setncatts({"units": "degree_east"})
# outVar[:] = lons[:]
#
# outVar = fh_out.createVariable("cdl", float, ("lat", "lon"))
# outVar[:] = ma.masked_less(cdl_values, 0)
#
# fh_out.close()
if __name__ == "__main__":
cdl_convert_to_nc("raw_data/cdl/2008_30m_cdls", "2008_30m_cdls.img",
"processed_data/cdl/30m/")
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/cdl.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy.ma as ma
import datetime
import sys
sys.path.append("..")
def extract_lai(nc_file):
fh_in = Dataset('../../raw_data/lai/' + nc_file, 'r')
for index, n_days in enumerate(fh_in.variables['time'][:]):
date = (datetime.datetime(2000, 1, 1, 0, 0) + datetime.timedelta(int(n_days))).strftime('%Y%m%d')
print(date)
fh_out = Dataset('../../processed_data/lai/500m/{}.nc'.format(date), 'w')
for name, dim in fh_in.dimensions.items():
if name != 'time':
fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)
ignore_features = ["time", "crs", "FparExtra_QC", "FparLai_QC"]
mask_value_dic = {'Lai_500m': 10, 'LaiStdDev_500m': 10, 'Fpar_500m': 1, 'FparStdDev_500m': 1}
for v_name, varin in fh_in.variables.items():
if v_name not in ignore_features:
dimensions = varin.dimensions if v_name in ['lat', 'lon'] else ('lat', 'lon')
outVar = fh_out.createVariable(v_name, varin.datatype, dimensions)
if v_name == "lat":
outVar.setncatts({"units": "degree_north"})
outVar[:] = varin[:]
elif v_name == "lon":
outVar.setncatts({"units": "degree_east"})
outVar[:] = varin[:]
else:
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
vin = varin[index, :, :]
vin = ma.masked_greater(vin, mask_value_dic[v_name])
vin = ma.masked_less(vin, 0)
outVar[:] = vin[:]
fh_out.close()
fh_in.close()
def extract_ndvi(nc_file):
fh_in = Dataset('../../raw_data/ndvi/' + nc_file, 'r')
for index, n_days in enumerate(fh_in.variables['time'][:]):
date = (datetime.datetime(2000, 1, 1, 0, 0) + datetime.timedelta(int(n_days))).strftime('%Y%m%d')
print(date)
fh_out = Dataset('../../processed_data/ndvi/1km/{}.nc'.format(date[:-2]), 'w')
for name, dim in fh_in.dimensions.items():
if name != 'time':
fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)
ignore_features = ["time", "crs", "_1_km_monthly_VI_Quality"]
for v_name, varin in fh_in.variables.items():
if v_name not in ignore_features:
dimensions = varin.dimensions if v_name in ['lat', 'lon'] else ('lat', 'lon')
v_name = v_name if v_name in ['lat', 'lon'] else v_name.split('_')[-1].lower()
outVar = fh_out.createVariable(v_name, varin.datatype, dimensions)
if v_name == "lat":
outVar.setncatts({"units": "degree_north"})
outVar[:] = varin[:]
elif v_name == "lon":
outVar.setncatts({"units": "degree_east"})
outVar[:] = varin[:]
else:
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
vin = varin[index, :, :]
vin = ma.masked_greater(vin, 1.0)
vin = ma.masked_less(vin, -0.2)
outVar[:] = vin[:]
fh_out.close()
fh_in.close()
if __name__ == '__main__':
# extract_lai('20190604.nc')
extract_ndvi('MOD13A3_20000201_20181231.nc')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/lai.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import sys
sys.path.append("..")
from data_preprocessing.utils import get_closet_date
def subset(in_file, out_file, lat1, lat2, lon1, lon2):
fh_in = Dataset(in_file, 'r')
fh_out = Dataset(out_file, 'w')
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
lat_indices = lats.size - np.searchsorted(lats[::-1], [lat1, lat2], side="right")
lon_indices = np.searchsorted(lons, [lon1, lon2])
lats = lats[lat_indices[0]: lat_indices[1]]
lons = lons[lon_indices[0]: lon_indices[1]]
fh_out.createDimension("lat", len(lats))
fh_out.createDimension("lon", len(lons))
for v_name, varin in fh_in.variables.items():
if v_name in ["lat", "lon"]:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
fh_out.variables["lat"][:] = lats[:]
fh_out.variables["lon"][:] = lons[:]
for v_name, varin in fh_in.variables.items():
if v_name not in ["lat", "lon"]:
outVar = fh_out.createVariable(v_name, varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[lat_indices[0]: lat_indices[1], lon_indices[0]: lon_indices[1]]
fh_in.close()
fh_out.close()
if __name__ == '__main__':
# subset('../../processed_data/nws_precip/500m/20190604.nc',
# '../../processed_data/local/ca_20190604/nws_precip.nc',
# 41.2047, 40.0268, -122.0304, -120.4676)
# subset('../../processed_data/elevation/500m.nc',
# '../../processed_data/local/ca_20190604/elevation.nc',
# 41.2047, 40.0268, -122.0304, -120.4676)
# subset('../../processed_data/soil_fraction/soil_fraction_usa_500m.nc',
# '../../processed_data/local/ca_20190604/soil_fraction.nc',
# 41.2047, 40.0268, -122.0304, -120.4676)
lai_date = get_closet_date('20190604', '../../processed_data/lai/500m')
print(lai_date)
subset('../../processed_data/lai/500m/{}.nc'.format(lai_date),
'../../processed_data/local/ca_20190604/lai.nc',
41.2047, 40.0268, -122.0304, -120.4676)
lst_date = get_closet_date('20190604', '../../processed_data/lst/500m')
print(lst_date)
subset('../../processed_data/lst/500m/{}.nc'.format(lst_date),
'../../processed_data/local/ca_20190604/lst.nc',
41.2047, 40.0268, -122.0304, -120.4676)
# subset('../../processed_data/soil_moisture/9km_500m/20190604.nc',
# '../../processed_data/local/ca_20190604/soil_moisture.nc',
# 41.2047, 40.0268, -122.0304, -120.4676)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/subset.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import datetime
import calendar
from collections import defaultdict
import numpy.ma as ma
import os
import sys
sys.path.append("..")
def extract_lst(nc_file):
fh_in = Dataset('../../raw_data/lst/' + nc_file, 'r')
for index, n_days in enumerate(fh_in.variables['time'][:]):
date = (datetime.datetime(2000, 1, 1, 0, 0) + datetime.timedelta(int(n_days))).strftime('%Y%m%d')
print(date)
fh_out = Dataset('../../raw_data/lst/1km/{}.nc'.format(date), 'w')
for name, dim in fh_in.dimensions.items():
if name != 'time':
fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)
ignore_features = ['time', 'crs', 'Clear_day_cov', 'Clear_night_cov', 'Day_view_angl', 'Day_view_time',
'Night_view_angl', 'Night_view_time', 'Emis_31', 'Emis_32', "QC_Day", "QC_Night"]
for v_name, varin in fh_in.variables.items():
if v_name not in ignore_features:
dimensions = varin.dimensions if v_name in ['lat', 'lon'] else ('lat', 'lon')
outVar = fh_out.createVariable(v_name, varin.datatype, dimensions)
if v_name == "lat":
outVar.setncatts({"units": "degree_north"})
outVar[:] = varin[:]
elif v_name == "lon":
outVar.setncatts({"units": "degree_east"})
outVar[:] = varin[:]
else:
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[index, :, :]
fh_out.close()
fh_in.close()
def generate_monthly_average(start_year, end_year, start_month, end_month):
in_dir = '../../raw_data/lst/1km'
out_dir = '../../processed_data/lst/monthly_1km'
os.makedirs(out_dir, exist_ok=True)
for year in range(start_year, end_year):
for month in range(start_month, end_month):
fh_out = Dataset('{}/{}{}.nc'.format(out_dir, year, '{0:02}'.format(month)), 'w')
print(year, month)
var_lis = defaultdict(list)
first = True
num_days = calendar.monthrange(year, month)[1]
days = map(lambda x: x.strftime('%Y%m%d'), [datetime.date(year, month, day) for day in range(1, num_days+1)])
for day in days:
if '{}.nc'.format(day) not in os.listdir(in_dir):
print('Missing {}'.format(day))
continue
fh_in = Dataset('{}/{}.nc'.format(in_dir, day), 'r')
len_lat, len_lon = len(fh_in.variables['lat'][:]), len(fh_in.variables['lon'][:])
assert len_lat == 3578 or len_lat == 3579
assert len_lon == 7797
for v_name, varin in fh_in.variables.items():
if v_name in ['LST_Day_1km', 'LST_Night_1km']:
if len_lat == 3578:
var_lis[v_name[:-4].lower()].append(fh_in.variables[v_name][:])
else:
var_lis[v_name[:-4].lower()].append(fh_in.variables[v_name][:-1, :])
if first:
for name, dim in fh_in.dimensions.items():
if name == 'lat':
fh_out.createDimension(name, 3578)
else:
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
if v_name in ['LST_Day_1km', 'LST_Night_1km'] or v_name in ["lat", "lon"]:
new_name = v_name[:-4].lower() if v_name in ['LST_Day_1km', 'LST_Night_1km'] else v_name
outVar = fh_out.createVariable(new_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name == 'lat':
outVar[:] = varin[:3578]
elif v_name == 'lon':
outVar[:] = varin[:]
first = False
fh_in.close()
for var in fh_out.variables:
if var != "lat" and var != "lon":
print(ma.array(var_lis[var]).shape)
fh_out.variables[var][:] = ma.array(var_lis[var]).mean(axis=0)
fh_out.close()
if __name__ == '__main__':
extract_lst('MOD11A1_20140201_20140930.nc')
generate_monthly_average(2014, 2015, 2, 10)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/lst.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import sys
sys.path.append("..")
from data_preprocessing.merge import merge_various_days
def generate_convert_to_nc_script():
fh_out = open('../../processed_data/landcover/convert_to_nc.sh', 'w')
fh_out.write('#!/bin/bash\n')
for tif_file in os.listdir('../../processed_data/landcover/'):
if tif_file.endswith('.tif'):
fh_out.write('gdal_translate -of netCDF {} {}.nc\n'.format(tif_file, tif_file[:-4]))
def mask_with_landcover(out_folder, kept_ldcs):
for nc_file in os.listdir('../../processed_data/landcover/origi/'):
if nc_file.endswith('.nc'):
fh_in = Dataset('../../processed_data/landcover/origi/{}'.format(nc_file), 'r')
fh_out = Dataset('../../processed_data/landcover/{}/{}'.format(out_folder, nc_file), 'w')
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name in ['lat', 'lon']:
outVar[:] = varin[:]
else:
landcovers = varin[:]
lc_mask = np.in1d(landcovers, kept_ldcs).reshape(landcovers.shape)
outVar[:] = ma.array(varin[:], mask=~lc_mask)
fh_in.close()
fh_out.close()
def generate_cropland(in_file, out_file):
fh_in = Dataset(in_file, 'r')
fh_out = Dataset(out_file, 'w')
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
outVar = fh_out.createVariable('cropland', 'f4', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0.0]).astype('f')})
cropland = np.full((len(lats), len(lons)), 1.0)
mask_value = ma.getmaskarray(fh_in.variables['Band1'][:])
mask_value = np.logical_and.reduce(mask_value)
outVar[:] = ma.array(cropland, mask=mask_value)
fh_in.close()
fh_out.close()
if __name__ == '__main__':
generate_convert_to_nc_script()
mask_with_landcover('cro', [12])
mask_with_landcover('cro_cvm', [12, 14])
merge_various_days('../../processed_data/landcover/origi/', '../../processed_data/landcover/', 'ts_merged',
select_vars=['Band1'])
merge_various_days('../../processed_data/landcover/cro/', '../../processed_data/landcover/', 'ts_merged_cro',
select_vars=['Band1'])
merge_various_days('../../processed_data/landcover/cro_cvm/', '../../processed_data/landcover/',
'ts_merged_cro_cvm', select_vars=['Band1'])
generate_cropland('../../processed_data/landcover/ts_merged_cro.nc',
'../../processed_data/landcover/cropland_cro.nc')
generate_cropland('../../processed_data/landcover/ts_merged_cro_cvm.nc',
'../../processed_data/landcover/cropland_cro_cvm.nc')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/preprocess/landcover.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .get_lat_lon_bins import get_lat_lon_bins
from .timing import timeit, timenow
from .generate_doy import generate_doy, generate_nearest_doys, generate_most_recent_doys, generate_doy_every_n
from .generate_doy import generate_future_doys
from .get_closest_date import get_closet_date
from .match_lat_lon import match_lat_lon
__all__ = ["get_lat_lon_bins",
"timeit", "timenow",
"generate_doy", "generate_most_recent_doys", "generate_nearest_doys",
"generate_doy_every_n", "generate_future_doys",
"get_closest_date",
"match_lat_lon"]
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from datetime import date
def get_closet_date(query_date, folder):
doys = [x[:-3] for x in os.listdir(folder) if x.endswith('.nc')]
doys = [date(*map(int, [x[:4], x[4:6], x[6:]])) for x in doys]
query_date = date(*map(int, [query_date[:4], query_date[4:6], query_date[6:]]))
return str(min(doys, key=lambda x: abs(x - query_date))).replace('-', '')
if __name__ == '__main__':
print(get_closet_date('20170101', ['20161230', '20170503', '20170105']))
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/get_closest_date.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def get_lat_lon_bins(lats, lons):
inter_lat = np.array([(x + y) / 2.0 for x, y in zip(lats[:-1], lats[1:])])
inter_lon = np.array([(x + y) / 2.0 for x, y in zip(lons[:-1], lons[1:])])
lat_bins = np.concatenate([[2 * inter_lat[0] - inter_lat[1]], inter_lat, [2 * inter_lat[-1] - inter_lat[-2]]])
lon_bins = np.concatenate([[2 * inter_lon[0] - inter_lon[1]], inter_lon, [2 * inter_lon[-1] - inter_lon[-2]]])
return lats, lons, lat_bins, lon_bins | Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/get_lat_lon_bins.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://stackoverflow.com/questions/1557571/how-do-i-get-time-of-a-python-programs-execution
import atexit
from time import time, clock
from time import strftime, localtime
import functools
def _secondsToStr(t):
return "%d:%02d:%02d.%03d" % \
functools.reduce(lambda ll,b : divmod(ll[0],b) + ll[1:], [(t*1000,),1000,60,60])
def _log(s, elapsed=None):
line = "=" * 40
print(line)
print(s)
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
if elapsed:
print("Elapsed time:", elapsed)
print(line)
print()
def _endlog(start):
end = time()
elapsed = end-start
_log("End Program", _secondsToStr(elapsed))
def timenow():
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), _secondsToStr(clock()))
def timeit():
start = time()
atexit.register(_endlog, start)
_log("Start Program")
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/timing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def match_lat_lon(lats_from, lons_from, lats_to, lons_to, expand=0):
i_lat_start = i_lat_end = i_lon_start = i_lon_end = 0
for i in range(len(lats_from)):
if abs(lats_from[i] - lats_to[0]) < 0.00001:
i_lat_start = i - expand
if abs(lats_from[i] - lats_to[-1]) < 0.00001:
i_lat_end = i + expand
for i in range(len(lons_from)):
if abs(lons_from[i] - lons_to[0]) < 0.00001:
i_lon_start = i - expand
if abs(lons_from[i] - lons_to[-1]) < 0.00001:
i_lon_end = i + expand
return i_lat_start, i_lat_end, i_lon_start, i_lon_end
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/match_lat_lon.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from datetime import date, timedelta
def generate_doy(s_doy, e_doy, delimiter):
s_doy = map(int, [s_doy[:4], s_doy[4:6], s_doy[6:]])
e_doy = map(int, [e_doy[:4], e_doy[4:6], e_doy[6:]])
d1 = date(*s_doy)
d2 = date(*e_doy)
delta = d2 - d1
for i in range(delta.days + 1):
yield str(d1 + timedelta(days=i)).replace("-", delimiter)
def generate_doy_every_n(s_doy, e_doy, n, delimiter):
s_doy = map(int, [s_doy[:4], s_doy[4:6], s_doy[6:]])
e_doy = map(int, [e_doy[:4], e_doy[4:6], e_doy[6:]])
d1 = date(*s_doy)
d2 = date(*e_doy)
delta = d2 - d1
for i in range(0, delta.days + 1, n):
yield str(d1 + timedelta(days=i)).replace("-", delimiter)
def generate_nearest_doys(doy, n, delimiter):
doy = map(int, [doy[:4], doy[4:6], doy[6:]])
d1 = date(*doy)
for i in range((n+1)//2-n, (n+1)//2):
yield str(d1 + timedelta(days=i)).replace("-", delimiter)
def generate_most_recent_doys(doy, n, delimiter):
doy = map(int, [doy[:4], doy[4:6], doy[6:]])
d1 = date(*doy)
for i in range(-1, -n-1, -1):
yield str(d1 + timedelta(days=i)).replace("-", delimiter)
def generate_future_doys(doy, n, delimiter):
doy = map(int, [doy[:4], doy[4:6], doy[6:]])
d1 = date(*doy)
for i in range(n):
yield str(d1 + timedelta(days=i)).replace("-", delimiter)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/utils/generate_doy.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import fiona
import sys
sys.path.append("..")
from data_preprocessing.utils import generate_doy
from data_preprocessing.preprocess import search_kdtree
def extract_shapefile():
shapefile = fiona.open('../../raw_data/nws_precip/nws_precip_allpoint_conversion/nws_precip_allpoint_conversion.shp')
lats = np.full((881, 1121), np.inf)
lons = np.full((881, 1121), np.inf)
max_hrapx, max_hrapy = -float('inf'), -float('inf')
for feature in shapefile:
hrapx, hrapy = feature['properties']['Hrapx'], feature['properties']['Hrapy']
max_hrapx = max(max_hrapx, hrapx)
max_hrapy = max(max_hrapy, hrapy)
lon, lat = feature['geometry']['coordinates']
if 0 <= hrapx < 1121 and 0 <= hrapy < 881:
lats[hrapy, hrapx] = lat
lons[hrapy, hrapx] = lon
print(max_hrapx, max_hrapy)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy', lats)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy', lons)
def compute_closest_grid_point(lats, lons, lat, lon):
d_lats = lats - float(lat)
d_lons = lons - float(lon)
d = np.multiply(d_lats, d_lats) + np.multiply(d_lons, d_lons)
i, j = np.unravel_index(d.argmin(), d.shape)
return i, j, np.sqrt(d.min())
def reproject_lat_lon():
lats = np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy')
lons = np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy')
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy', results)
def reproject_nws_precip(doy):
print(doy)
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
fh_in = Dataset('../../raw_data/nws_precip/{}/nws_precip_1day_{}_conus.nc'.format(doy, doy), 'r')
fh_out = Dataset('../../processed_data/nws_precip/500m/{}.nc'.format(doy), 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
observed_values = fh_in.variables['observation'][:]
projected_values = np.full((n_lat, n_lon), -9999.9)
projected_indices = \
np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
proj_i, proj_j = 881 - projected_indices[projected_i] // 1121, projected_indices[projected_i] % 1121
if not observed_values.mask[proj_i, proj_j]:
projected_values[i, j] = observed_values[proj_i, proj_j]
projected_i += 1
outVar = fh_out.createVariable('precip', 'f4', ('lat', 'lon'))
outVar[:] = ma.masked_equal(projected_values, -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# extract_shapefile()
# reproject_lat_lon()
for doy in generate_doy('20171227', '20171231', ''):
reproject_nws_precip(doy)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/nws_precip.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import pandas as pd
import csv
import sys
sys.path.append("..")
from data_preprocessing.rescaling.rescale_utils import search_kdtree
def reproject_lat_lon():
fh_sf = Dataset('../../raw_data/soil_fraction/soil_fraction_usa.nc', 'r')
lats, lons = fh_sf.variables['lat'][:], fh_sf.variables['lon'][:]
lons, lats = np.meshgrid(lons, lats)
fh_ref = Dataset('../../processed_data/lst/monthly_1km/201701.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../raw_data/soil_fraction/projected_indices_lst_1km.npy', results)
def reproject_sf():
fh_ref = Dataset('../../processed_data/lst/monthly_1km/201701.nc', 'r')
fh_in = Dataset('../../raw_data/soil_fraction/soil_fraction_usa.nc', 'r')
fh_out = Dataset('../../processed_data/soil_fraction/soil_fraction_usa_1km.nc', 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
origi_values = {}
projected_values = {}
for v_name, varin in fh_in.variables.items():
if v_name not in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, 'f4', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([-9999.9]).astype('f')})
origi_values[v_name] = varin[:]
projected_values[v_name] = np.full((n_lat, n_lon), -9999.9)
projected_indices = np.load('../../raw_data/soil_fraction/projected_indices_lst_1km.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
for key in origi_values.keys():
proj_i, proj_j = projected_indices[projected_i] // 8724, projected_indices[projected_i] % 8724
if not origi_values[key].mask[proj_i, proj_j]:
projected_values[key][i, j] = origi_values[key][proj_i, proj_j]
projected_i += 1
for key in origi_values.keys():
fh_out.variables[key][:] = ma.masked_equal(projected_values[key], -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# reproject_lat_lon()
reproject_sf()
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/soil_fraction.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
import sys
sys.path.append("..")
from data_preprocessing.rescaling.rescale_utils import get_lat_lon_bins
def reproject_us_counties(in_file, ref_file, out_file):
fh_in = Dataset(in_file, 'r')
fh_out = Dataset(out_file, 'w')
fh_ref = Dataset(ref_file, 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
lat_bins, lon_bins = get_lat_lon_bins(ref_lats, ref_lons)
origi_lats = fh_in.variables['lat']
origi_lats_value = origi_lats[:]
origi_lons = fh_in.variables['lon']
origi_lons_value = origi_lons[:]
origi_values = {}
sampled_values = {}
selected_vars = []
for v in fh_in.variables:
if v not in ['lat', 'lon']:
selected_vars.append(v)
origi_values[v] = fh_in.variables[v][:]
sampled_values[v] = np.full((len(ref_lats), len(ref_lons)), 0)
for id_lats in range(len(ref_lats)):
for id_lons in range(len(ref_lons)):
lats_index = np.searchsorted(origi_lats_value, [lat_bins[id_lats + 1], lat_bins[id_lats]])
lons_index = np.searchsorted(origi_lons_value, [lon_bins[id_lons], lon_bins[id_lons + 1]])
if lats_index[0] != lats_index[1] and lons_index[0] != lons_index[1]:
for v in selected_vars:
selected = origi_values[v][np.array(range(lats_index[0], lats_index[1])),
np.array(range(lons_index[0], lons_index[1]))]
if selected.count() > 0:
sampled_values[v][id_lats, id_lons] = np.bincount(selected.compressed()).argmax()
else:
sampled_values[v][id_lats, id_lons] = 0
print(id_lats)
fh_out.createDimension('lat', len(ref_lats))
fh_out.createDimension('lon', len(ref_lons))
outVar = fh_out.createVariable('lat', 'f4', ('lat',))
outVar.setncatts({k: origi_lats.getncattr(k) for k in origi_lats.ncattrs()})
outVar[:] = ref_lats[:]
outVar = fh_out.createVariable('lon', 'f4', ('lon',))
outVar.setncatts({k: origi_lons.getncattr(k) for k in origi_lons.ncattrs()})
outVar[:] = ref_lons[:]
outVar = fh_out.createVariable('county_label', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
outVar[:] = ma.masked_equal(sampled_values['county_label'], 0)
outVar = fh_out.createVariable('state_code', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
outVar[:] = ma.masked_equal(sampled_values['state_code'], 0)
outVar = fh_out.createVariable('county_code', 'int', ('lat', 'lon'))
outVar.setncatts({'_FillValue': np.array([0]).astype(int)})
outVar[:] = ma.masked_equal(sampled_values['county_code'], 0)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
reproject_us_counties('../../processed_data/counties/us_counties.nc',
'../../processed_data/lst/monthly_1km/201505.nc',
'../../processed_data/counties/lst/us_counties.nc')
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/us_counties.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from netCDF4 import Dataset
import numpy as np
def get_origi_lat_lon():
in_dir = '../../processed_data/prism/monthly'
lats, lons = None, None
for f in os.listdir(in_dir):
if f.endswith('.nc'):
fh = Dataset(os.path.join(in_dir, f), 'r')
if lats is None and lons is None:
lats, lons = fh.variables['lat'][:], fh.variables['lon'][:]
else:
assert np.allclose(lats, fh.variables['lat'][:])
assert np.allclose(lons, fh.variables['lon'][:])
out_dir = '../../processed_data/prism/latlon'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
np.save(os.path.join(out_dir, 'lat_4km.npy'), lats.compressed())
np.save(os.path.join(out_dir, 'lon_4km.npy'), lons.compressed())
def get_lat_lon_even(n=10):
"""
:param n: how many pixels in lat or lon constructs one cell, e.g. n = 10 means the cell will be ~40 km * 40 km
"""
origi_lats = np.load('../../processed_data/prism/latlon/lat_4km.npy')
origi_lons = np.load('../../processed_data/prism/latlon/lon_4km.npy')
# print('Lengths of origi: ', len(origi_lats), len(origi_lons))
n_cell_lat = len(origi_lats)//n
n_cell_lon = len(origi_lons)//n
new_lats = []
new_lons = []
for i in range(n_cell_lat):
i1, i2 = (n//2-1) + n * i, n//2 + n * i
new_lats.append((origi_lats[i1] + origi_lats[i2])/2)
for i in range(n_cell_lon):
i1, i2 = (n // 2 - 1) + n * i, n // 2 + n * i
new_lons.append((origi_lons[i1] + origi_lons[i2])/2)
out_dir = '../../processed_data/prism/latlon'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
np.save(os.path.join(out_dir, 'lat_{}km.npy'.format(4*n)), np.asarray(new_lats))
np.save(os.path.join(out_dir, 'lon_{}km.npy').format(4*n), np.asarray(new_lons))
if __name__ == "__main__":
# get_origi_lat_lon()
get_lat_lon_even()
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/prism_upscale.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
import sys
sys.path.append("..")
from data_preprocessing.rescaling.rescale_utils import get_lat_lon_bins
def reproject_elevation():
fh_in = Dataset('../../raw_data/elevation/90m.nc', 'r')
fh_out = Dataset('../../processed_data/elevation/1km.nc', 'w')
fh_ref = Dataset('../../processed_data/lst/monthly_1km/201508.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
lat_bins, lon_bins = get_lat_lon_bins(ref_lats, ref_lons)
ele_lats = fh_in.variables['lat']
ele_lats_value = ele_lats[:][::-1]
ele_lons = fh_in.variables['lon']
ele_lons_value = ele_lons[:]
ele_var = fh_in.variables['Band1'][0, :, :]
ele_resampled = np.full((len(ref_lats), len(ref_lons)), -9999.9)
# ele_std_resampled = np.full((len(ref_lats), len(ref_lons)), -9999.9)
for id_lats in range(len(ref_lats)):
for id_lons in range(len(ref_lons)):
lats_index = np.searchsorted(ele_lats_value, [lat_bins[id_lats + 1], lat_bins[id_lats]])
lons_index = np.searchsorted(ele_lons_value, [lon_bins[id_lons], lon_bins[id_lons + 1]])
if lats_index[0] != lats_index[1] and lons_index[0] != lons_index[1]:
ele_selected = ele_var[np.array(range(-lats_index[1], -lats_index[0]))[:, None],
np.array(range(lons_index[0], lons_index[1]))]
avg = ma.mean(ele_selected)
# std = ma.std(ele_selected)
ele_resampled[id_lats, id_lons] = (avg if avg is not ma.masked else -9999.9)
# ele_std_resampled[id_lats, id_lons] = (std if std is not ma.masked else -9999.9)
print(id_lats)
ele_resampled = ma.masked_equal(ele_resampled, -9999.9)
# ele_std_resampled = ma.masked_equal(ele_std_resampled, -9999.9)
fh_out.createDimension('lat', len(ref_lats))
fh_out.createDimension('lon', len(ref_lons))
outVar = fh_out.createVariable('lat', 'f4', ('lat',))
outVar.setncatts({k: ele_lats.getncattr(k) for k in ele_lats.ncattrs()})
outVar[:] = ref_lats[:]
outVar = fh_out.createVariable('lon', 'f4', ('lon',))
outVar.setncatts({k: ele_lons.getncattr(k) for k in ele_lons.ncattrs()})
outVar[:] = ref_lons[:]
# outVar = fh_out.createVariable('elevation_mean', 'f4', ('lat', 'lon',))
outVar = fh_out.createVariable('elevation', 'f4', ('lat', 'lon',))
outVar.setncatts({'units': "m"})
outVar.setncatts({'long_name': "USGS_NED Elevation value"})
outVar.setncatts({'_FillValue': np.array([-9999.9]).astype('f')})
outVar[:] = ele_resampled[:]
# outVar = fh_out.createVariable('elevation_std', 'f4', ('lat', 'lon',))
# outVar.setncatts({'units': "m"})
# outVar.setncatts({'long_name': "USGS_NED Elevation value"})
# outVar.setncatts({'_FillValue': np.array([-9999.9]).astype('f')})
# outVar[:] = ele_std_resampled[:]
if __name__ == '__main__':
reproject_elevation()
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/elevation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from data_preprocessing.utils import get_lat_lon_bins
from data_preprocessing import cdl_values_to_crops, crops_to_cdl_values
from data_preprocessing.utils import timeit
import os
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
# water: Water Wetlands Aquaculture Open Water Perennial Ice/Snow
# urban: Developed Developed/Open Space Developed/Low Intensity Developed/Med Intensity Developed/High Intensity
# native: Clover/Wildflowers Forest Shrubland1 Deciduous Forest Evergreen Forest Mixed Forest Shrubland2 Woody Wetlands
# Herbaceous Wetlands
# idle/fallow: Sod/Grass Seed Fallow/Idle Cropland
# hay/pasture: Other Hay/Non Alfalfa Switchgrass Grassland/Pasture
# barren/missing: Barren1 Clouds/No Data Nonag/Undefined Barren2
ignored_labels = {"water": [83, 87, 92, 111, 112],
"urban": [82, 121, 122, 123, 124],
"native": [58, 63, 64, 141, 142, 143, 152, 190, 195],
"idle/fallow": [59, 61],
"hay/pasture": [37, 60, 176],
"barren/missing": [65, 81, 88, 131]}
def cdl_upscale(in_dir, in_file, out_dir, out_file, reso='40km', ignore=False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
ignored_lis = [x for lis in ignored_labels.values() for x in lis]
kept_lis = [x for x in cdl_values_to_crops.keys() if x not in ignored_lis]
# increasing
lats = np.load('../../processed_data/prism/latlon/lat_{}.npy'.format(reso))
lons = np.load('../../processed_data/prism/latlon/lon_{}.npy'.format(reso))
_, _, lat_bins, lon_bins = get_lat_lon_bins(lats, lons)
fh_in = Dataset(os.path.join(in_dir, in_file), 'r')
fh_out = Dataset(os.path.join(out_dir, out_file), 'w')
dic_var = {}
for var in ['lat', 'lon']:
dic_var[var] = fh_in.variables[var]
# increasing
dic_var['lat_value'] = dic_var['lat'][:]
dic_var['lon_value'] = dic_var['lon'][:]
fh_out.createDimension('lat', len(lats))
fh_out.createDimension('lon', len(lons))
for var in ['lat', 'lon']:
outVar = fh_out.createVariable(var, 'f4', (var,))
outVar.setncatts({k: dic_var[var].getncattr(k) for k in dic_var[var].ncattrs()})
outVar[:] = lats if var == "lat" else lons
cdl_value = fh_in.variables['Band1'][:]
cdl_resampled_dic = {}
for v in cdl_values_to_crops.values():
if (ignore and crops_to_cdl_values[v] in kept_lis) or not ignore:
cdl_resampled_dic[v] = np.full((len(lats), len(lons)), -1.0)
for s in ["1", "2", "3"]:
cdl_resampled_dic["cdl_" + s] = np.full((len(lats), len(lons)), -1.0)
cdl_resampled_dic["cdl_fraction_" + s] = np.full((len(lats), len(lons)), -1.0)
for id_lats in range(len(lats)):
for id_lons in range(len(lons)):
lats_index = np.searchsorted(dic_var['lat_value'],
[lat_bins[id_lats], lat_bins[id_lats + 1]])
lons_index = np.searchsorted(dic_var['lon_value'],
[lon_bins[id_lons], lon_bins[id_lons + 1]])
if lats_index[0] != lats_index[1] and lons_index[0] != lons_index[1]:
selected = cdl_value[np.array(range(lats_index[0], lats_index[1]))[:, None],
np.array(range(lons_index[0], lons_index[1]))]
# selected_size = selected.shape[0] * selected.shape[1]
selected_compressed = selected.compressed()
selected_size = len(selected_compressed)
cdl_id, cdl_count = np.unique(selected_compressed, return_counts=True)
# filter ignored_label after selected_size has been calculated
if ignore:
new_cdl_id, new_cdl_count = [], []
for i, c in zip(cdl_id, cdl_count):
if i in kept_lis:
new_cdl_id.append(i)
new_cdl_count.append(c)
cdl_id, cdl_count = np.asarray(new_cdl_id), np.asarray(new_cdl_count)
for i, c in zip(cdl_id, cdl_count):
cdl_resampled_dic[cdl_values_to_crops[i]][id_lats, id_lons] = c / selected_size
cdl_count_sort_ind = np.argsort(-cdl_count)
for i in range(3):
if len(cdl_id) > i:
cdl_resampled_dic["cdl_" + str(i+1)][id_lats, id_lons] = \
cdl_id[cdl_count_sort_ind[i]]
cdl_resampled_dic["cdl_fraction_" + str(i+1)][id_lats, id_lons] = \
cdl_count[cdl_count_sort_ind[i]] / selected_size
else:
cdl_resampled_dic["cdl_" + str(i + 1)][id_lats, id_lons] = -1
cdl_resampled_dic["cdl_fraction_" + str(i + 1)][id_lats, id_lons] = -1
for v in cdl_values_to_crops.values():
if (ignore and crops_to_cdl_values[v] in kept_lis) or not ignore:
outVar = fh_out.createVariable("cdl_" + v.lower().replace(' ', '_').replace(' & ', '_').replace('/', '_'),
'f4', ('lat', 'lon',))
outVar[:] = cdl_resampled_dic[v][:]
outVar[:] = ma.masked_equal(outVar, -1.0)
for s in ["1", "2", "3"]:
for t in ["cdl_", "cdl_fraction_"]:
outVar = fh_out.createVariable(t + s, 'f4', ('lat', 'lon',))
outVar[:] = cdl_resampled_dic[t + s][:]
outVar[:] = ma.masked_equal(outVar, -1.0)
fh_in.close()
fh_out.close()
def upscaled_cdl_postprocess(in_file, out_dir, out_file, threshold=0.0):
fh_in = Dataset(in_file, 'r')
cdl_fraction_1 = fh_in.variables['cdl_fraction_1'][:]
kept_cdls = ma.masked_where(cdl_fraction_1 < threshold, fh_in.variables['cdl_1'][:])
cdl_id, cdl_count = np.unique(kept_cdls.compressed(), return_counts=True)
for i, c in zip(cdl_id, cdl_count):
print(cdl_values_to_crops[int(i)], c)
if __name__ == "__main__":
timeit()
# cdl_upscale('../../raw_data/cdl/2018_30m_cdls', '2018_30m_cdls.nc',
# '../../processed_data/cdl/40km', '2018_40km_cdls_crop_only.nc', reso='40km', ignore=True)
upscaled_cdl_postprocess('../../processed_data/cdl/40km/2018_40km_cdls_crop_only.nc',
'', '')
# print([x for lis in ignored_labels.values() for x in lis])
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/cdl_upscale.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .rescale_utils import search_kdtree
from .rescale_utils import get_lat_lon_bins
__all__ = ['search_kdtree', 'get_lat_lon_bins']
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import sys
sys.path.append("..")
from data_preprocessing.utils import generate_doy
from data_preprocessing.rescaling.rescale_utils import search_kdtree
def reproject_lat_lon():
fh_prism = Dataset('../../processed_data/prism/combined_monthly/201701.nc', 'r')
lats, lons = fh_prism.variables['lat'][:], fh_prism.variables['lon'][:]
lons, lats = np.meshgrid(lons, lats)
fh_ref = Dataset('../../processed_data/lst/monthly_1km/201701.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../processed_data/prism/projected_indices_lst_1km.npy', results)
def reproject_prism(doy):
fh_ref = Dataset('../../processed_data/lst/monthly_1km/201702.nc', 'r')
fh_in = Dataset('../../processed_data/prism/combined_monthly/{}.nc'.format(doy), 'r')
fh_out = Dataset('../../processed_data/prism/combined_monthly_1km/{}.nc'.format(doy), 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
origi_values = {}
projected_values = {}
for v_name, varin in fh_in.variables.items():
if v_name not in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
origi_values[v_name] = varin[:]
projected_values[v_name] = np.full((n_lat, n_lon), -9999.9)
projected_indices = np.load('../../processed_data/prism/projected_indices_lst_1km.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
for key in origi_values.keys():
proj_i, proj_j = projected_indices[projected_i] // 1405, projected_indices[projected_i] % 1405
if not origi_values[key].mask[proj_i, proj_j]:
projected_values[key][i, j] = origi_values[key][proj_i, proj_j]
projected_i += 1
for key in origi_values.keys():
fh_out.variables[key][:] = ma.masked_equal(projected_values[key], -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# reproject_lat_lon()
for year in range(2018, 2019):
for month in range(2, 11):
reproject_prism(doy='{}{}'.format(year, '{0:02}'.format(month)))
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/prism_downscale.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import datetime
import os
import sys
sys.path.append("..")
from data_preprocessing.rescaling.rescale_utils import search_kdtree
def reproject_lat_lon():
fh_lst = Dataset('../../raw_data/lst/1km/20170101.nc', 'r')
lats, lons = fh_lst.variables['lat'][:], fh_lst.variables['lon'][:]
lons, lats = np.meshgrid(lons, lats)
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../raw_data/lst/projected_indices_lai_500m.npy', results)
def reproject_lst(doy):
print(doy)
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
fh_in = Dataset('../../raw_data/lst/1km/{}.nc'.format(doy), 'r')
fh_out = Dataset('../../processed_data/lst/500m/{}.nc'.format(doy), 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
origi_values = {}
projected_values = {}
for v_name, varin in fh_in.variables.items():
if v_name not in ['lat', 'lon']:
new_name = '_'.join(v_name.lower().split('_')[:-1])
outVar = fh_out.createVariable(new_name, varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
origi_values[new_name] = varin[:]
projected_values[new_name] = np.full((n_lat, n_lon), -9999.9)
projected_indices = np.load('../../raw_data/lst/projected_indices_lai_500m.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
for key in origi_values.keys():
proj_i, proj_j = projected_indices[projected_i] // 7797, projected_indices[projected_i] % 7797
if not origi_values[key].mask[proj_i, proj_j]:
projected_values[key][i, j] = origi_values[key][proj_i, proj_j]
projected_i += 1
for key in origi_values.keys():
fh_out.variables[key][:] = ma.masked_equal(projected_values[key], -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# reproject_lat_lon()
for doy in os.listdir('../../raw_data/lst/1km/'):
if doy.endswith('.nc'):
doy = doy[:-3]
date = datetime.datetime.strptime(doy, "%Y%m%d").date()
date_start = datetime.datetime.strptime('20190602', "%Y%m%d").date()
date_end = datetime.datetime.strptime('20190602', "%Y%m%d").date()
if date_start <= date <= date_end:
reproject_lst(doy)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/lst.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from scipy.spatial import cKDTree
import numpy as np
def search_kdtree(lats, lons, points):
mytree = cKDTree(np.dstack([lats.ravel(), lons.ravel()])[0])
print('Finish building KDTree')
dist, indices = mytree.query(points)
return indices
def get_lat_lon_bins(lats, lons):
inter_lat = np.array([(x + y) / 2.0 for x, y in zip(lats[:-1], lats[1:])])
inter_lon = np.array([(x + y) / 2.0 for x, y in zip(lons[:-1], lons[1:])])
lat_bins = np.concatenate([[2 * inter_lat[0] - inter_lat[1]], inter_lat, [2 * inter_lat[-1] - inter_lat[-2]]])
lon_bins = np.concatenate([[2 * inter_lon[0] - inter_lon[1]], inter_lon, [2 * inter_lon[-1] - inter_lon[-2]]])
return lat_bins, lon_bins
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/rescale_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import sys
sys.path.append("..")
from data_preprocessing.utils import generate_doy
from data_preprocessing.rescaling.rescale_utils import search_kdtree
def reproject_lat_lon():
fh_sm = Dataset('../../raw_data/soil_moisture/9km/20170101.nc', 'r')
lats, lons = fh_sm.variables['lat'][:], fh_sm.variables['lon'][:]
lons, lats = np.meshgrid(lons, lats)
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../raw_data/soil_moisture/projected_indices_lai_500m.npy', results)
def reproject_sm(doy):
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
fh_in = Dataset('../../raw_data/soil_moisture/9km/{}.nc'.format(doy), 'r')
fh_out = Dataset('../../processed_data/soil_moisture/9km_500m/{}.nc'.format(doy), 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
origi_values = {}
projected_values = {}
for v_name, varin in fh_in.variables.items():
if v_name in ['soil_moisture']:
outVar = fh_out.createVariable(v_name, varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
origi_values[v_name] = varin[:]
projected_values[v_name] = np.full((n_lat, n_lon), -9999.9)
projected_indices = np.load('../../raw_data/soil_moisture/projected_indices_lai_500m.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
for key in origi_values.keys():
proj_i, proj_j = projected_indices[projected_i] // 674, projected_indices[projected_i] % 674
if not origi_values[key].mask[proj_i, proj_j]:
projected_values[key][i, j] = origi_values[key][proj_i, proj_j]
projected_i += 1
for key in origi_values.keys():
fh_out.variables[key][:] = ma.masked_equal(projected_values[key], -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# reproject_lat_lon()
for doy in generate_doy('20181002', '20181231', ''):
reproject_sm(doy)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/rescaling/soil_moisture.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import csv
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
from collections import defaultdict
import os
def get_counties_lat_lon_indices(in_file, out_file, n_pixels):
counties = pd.read_csv(in_file)
counties.columns = ['state', 'county', 'lat', 'lon']
prism_file = Dataset('../../processed_data/prism/monthly/ppt_199901.nc', 'r')
prism_lats, prism_lons = prism_file.variables['lat'][:], prism_file.variables['lon'][:]
prism_file.close()
# output columns: state, county, lat, lon, lat_index0, lat_index1, lon_index0, lon_index1
with open(out_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['state', 'county', 'lat', 'lon', 'lat0', 'lat1', 'lon0', 'lon1'])
for c in counties.itertuples():
state, county, lat, lon = c.state, c.county, c.lat, c.lon
lat_indices = sorted(np.argsort(np.abs(prism_lats - lat))[:n_pixels])
lon_indices = sorted(np.argsort(np.abs(prism_lons - lon))[:n_pixels])
line = [state, county, lat, lon, lat_indices[0], lat_indices[-1], lon_indices[0], lon_indices[-1]]
writer.writerow(line)
def generate_no_spatial(croptype, start_month, end_month, selected_states=None):
yield_data = pd.read_csv('../../processed_data/crop_yield/{}_1999_2018.csv'.format(croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
counties = pd.read_csv('../../processed_data/counties/prism/us_counties_cro_cvm_locations.csv')
county_dic = {}
for c in counties.itertuples():
state, county, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat0, lat1, lon0, lon1]
climate_vars = ["ppt", "tdmean", "tmax", "tmean", "tmin", "vpdmax", "vpdmin"]
csv_header = ['year', 'state', 'county', 'yield']
for month in map(str, range(start_month, end_month+1)):
for climate_var in climate_vars:
csv_header.append(climate_var + "_" + month)
for climate_var in climate_vars:
csv_header.append(climate_var + "_mean")
output_file = '../../experiment_data/no_spatial/{}_{}_{}.csv'.format(croptype, start_month, end_month) \
if not selected_states else '../../experiment_data/no_spatial/{}_{}_{}_major_states.csv'.format(croptype, start_month, end_month)
with open(output_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(csv_header)
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
if selected_states is not None and state not in selected_states:
continue
# no location info
if (state, county) not in county_dic:
continue
lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 9
assert lon1 - lon0 == 9
values = [year, state, county, value]
value_dic = defaultdict(list)
for month in range(start_month, end_month+1):
fh = Dataset('../../processed_data/prism/combined_monthly/{}{}.nc'.format(year, '{0:02}'.format(month)))
for climate_var in climate_vars:
selected_values = fh.variables[climate_var][lat0:lat1+1, lon0:lon1+1]
averaged = ma.mean(selected_values)
values.append(averaged)
value_dic[climate_var].append(averaged)
fh.close()
for climate_var in climate_vars:
values.append(np.mean(value_dic[climate_var]))
writer.writerow(values)
def average_by_year(start_month, end_month):
if not os.path.exists('../../experiment_data/only_spatial/averaged_{}_{}/nc'.format(start_month, end_month)):
os.makedirs('../../experiment_data/only_spatial/averaged_{}_{}/nc'.format(start_month, end_month))
for year in range(1999, 2019):
fh_out = Dataset('../../experiment_data/only_spatial/averaged_{}_{}/nc/{}.nc'.format(start_month, end_month, year), 'w')
first_flag = True
var_lis = defaultdict(list)
for month in range(start_month, end_month+1):
fh_in = Dataset('../../processed_data/prism/combined_monthly/{}{}.nc'.format(year, '{0:02}'.format(month)))
if first_flag:
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_in.variables.items():
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name in ["lat", "lon"]:
outVar[:] = varin[:]
first_flag = False
for v_name, varin in fh_in.variables.items():
if v_name not in ["lat", "lon"]:
var_lis[v_name].append(fh_in.variables[v_name][:])
fh_in.close()
for var in fh_out.variables:
if var != "lat" and var != "lon":
fh_out.variables[var][:] = ma.array(var_lis[var]).mean(axis=0)
fh_out.close()
def generate_only_spatial(croptype, start_month, end_month, selected_states=None):
yield_data = pd.read_csv('../../processed_data/crop_yield/{}_1999_2018.csv'.format(croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
counties = pd.read_csv('../../processed_data/counties/prism/us_counties_cro_cvm_locations.csv')
county_dic = {}
for c in counties.itertuples():
state, county, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat0, lat1, lon0, lon1]
climate_vars = ["ppt", "tdmean", "tmax", "tmean", "tmin", "vpdmax", "vpdmin"]
yield_values = []
output_folder = 'counties' if not selected_states else 'counties_major_states'
if not os.path.exists('../../experiment_data/only_spatial/averaged_{}_{}/{}'.format(start_month, end_month, output_folder)):
os.makedirs('../../experiment_data/only_spatial/averaged_{}_{}/{}'.format(start_month, end_month, output_folder))
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
if selected_states is not None and state not in selected_states:
continue
# no location info
if (state, county) not in county_dic:
continue
lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 9
assert lon1 - lon0 == 9
values = []
fh = Dataset('../../experiment_data/only_spatial/averaged_{}_{}/nc/{}.nc'.format(start_month, end_month, year))
for climate_var in climate_vars:
values.append(fh.variables[climate_var][lat0:lat1+1, lon0:lon1+1])
values = np.asarray(values)
np.save('../../experiment_data/only_spatial/averaged_{}_{}/{}/{}_{}_{}.npy'.format(start_month, end_month, output_folder,
state, county, year), values)
yield_values.append([year, state, county, value])
assert values.shape == (7, 10, 10), values.shape
fh.close()
np.save('../../experiment_data/only_spatial/averaged_{}_{}/{}/y.npy'.format(start_month, end_month, output_folder),
np.asarray(yield_values))
def combine_by_year(start_month, end_month):
for year in range(1999, 2019):
fh_out = Dataset('../../experiment_data/spatial_temporal/{}_{}/nc/{}.nc'.format(start_month, end_month, year), 'w')
var_list = []
n_t = end_month - start_month + 1
first_flag = True
n_dim = {}
for i_month, month in enumerate(range(start_month, end_month+1)):
fh_in = Dataset('../../processed_data/prism/combined_monthly/{}{}.nc'.format(year, '{0:02}'.format(month)))
if first_flag:
for name, dim in fh_in.dimensions.items():
n_dim[name] = len(dim)
fh_out.createDimension(name, len(dim))
fh_out.createDimension('time', n_t)
outVar = fh_out.createVariable('time', 'int', ("time",))
outVar[:] = range(start_month, end_month + 1)
for v_name, varin in fh_in.variables.items():
if v_name == 'lat' or v_name == 'lon':
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
else:
var_list.append(v_name)
outVar = fh_out.createVariable(v_name, varin.datatype, ("time", "lat", "lon",))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = np.empty((n_t, n_dim['lat'], n_dim['lon']))
first_flag = False
for vname in var_list:
var_value = fh_in.variables[vname][:]
fh_out.variables[vname][i_month, :, :] = var_value[:]
fh_in.close()
fh_out.close()
def generate_spatial_temporal(croptype, start_month, end_month, selected_states=None):
yield_data = pd.read_csv('../../processed_data/crop_yield/{}_1999_2018.csv'.format(croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
counties = pd.read_csv('../../processed_data/counties/prism/us_counties_cro_cvm_locations.csv')
county_dic = {}
for c in counties.itertuples():
state, county, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat0, lat1, lon0, lon1]
climate_vars = ["ppt", "tdmean", "tmax", "tmean", "tmin", "vpdmax", "vpdmin"]
yield_values = []
output_folder = 'counties' if not selected_states else 'counties_major_states'
if not os.path.exists(
'../../experiment_data/spatial_temporal/{}_{}/{}'.format(start_month, end_month, output_folder)):
os.makedirs(
'../../experiment_data/spatial_temporal/{}_{}/{}'.format(start_month, end_month, output_folder))
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
if selected_states is not None and state not in selected_states:
continue
# no location info
if (state, county) not in county_dic:
continue
lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 9
assert lon1 - lon0 == 9
values = []
fh = Dataset('../../experiment_data/spatial_temporal/{}_{}/nc/{}.nc'.format(start_month, end_month, year))
for climate_var in climate_vars:
values.append(fh.variables[climate_var][:, lat0:lat1 + 1, lon0:lon1 + 1])
values = np.asarray(values)
np.save('../../experiment_data/spatial_temporal/{}_{}/{}/{}_{}_{}.npy'.format(start_month, end_month,
output_folder, state, county, year),
values)
yield_values.append([year, state, county, value])
assert values.shape == (7, end_month-start_month+1, 10, 10), values.shape
fh.close()
np.save('../../experiment_data/spatial_temporal/{}_{}/{}/y.npy'.format(start_month, end_month, output_folder),
np.asarray(yield_values))
if __name__ == '__main__':
# get_counties_lat_lon_indices('../../processed_data/counties/us_counties_cro_cvm_locations.csv',
# '../../processed_data/counties/prism/us_counties_cro_cvm_locations.csv',
# n_pixels=10)
#
# average_by_year(1, 9)
# combine_by_year(1, 9)
# generate_no_spatial('soybeans', 1, 9)
generate_only_spatial('soybeans', 1, 9)
generate_spatial_temporal('soybeans', 1, 9)
MAJOR_STATES = [5, 17, 18, 19, 20, 27, 29, 31, 38, 39, 46]
# mask_non_major_states('../../experiment_data/only_spatial/averaged_1_9/nc',
# '../../experiment_data/only_spatial/averaged_1_9/nc_major_states',
# MAJOR_STATES)
# mask_non_major_states('../../experiment_data/spatial_temporal/1_9/nc',
# '../../experiment_data/spatial_temporal/1_9/nc_major_states',
# MAJOR_STATES)
# generate_no_spatial('soybeans', 1, 9, selected_states=MAJOR_STATES)
generate_only_spatial('soybeans', 1, 9, selected_states=MAJOR_STATES)
generate_spatial_temporal('soybeans', 1, 9, selected_states=MAJOR_STATES)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/postprocess/prism.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .combine_multi_vars import mask_non_major_states
from .combine_multi_vars import generate_no_spatial_for_counties
from .combine_multi_vars import obtain_channel_wise_mean_std
__all__ = ['mask_non_major_states',
'generate_no_spatial_for_counties',
'obtain_channel_wise_mean_std']
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/postprocess/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import csv
from collections import defaultdict
import numpy.ma as ma
import pickle
import sys
sys.path.append("..")
from data_preprocessing import CLIMATE_VARS
from data_preprocessing import STATIC_CLIMATE_VARS
from data_preprocessing import DYNAMIC_CLIMATE_VARS
def get_counties_lat_lon_indices(in_file, out_file, n_pixels):
counties = pd.read_csv(in_file)
counties.columns = ['state', 'county', 'lat', 'lon']
ref_file = Dataset('../../processed_data/lst/monthly_1km/201505.nc', 'r')
ref_lats, ref_lons = ref_file.variables['lat'][:], ref_file.variables['lon'][:]
ref_file.close()
# output columns: state, county, lat, lon, lat_index0, lat_index1, lon_index0, lon_index1
with open(out_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['state', 'county', 'lat', 'lon', 'lat0', 'lat1', 'lon0', 'lon1'])
for c in counties.itertuples():
state, county, lat, lon = c.state, c.county, c.lat, c.lon
lat_indices = sorted(np.argsort(np.abs(ref_lats - lat))[:n_pixels])
lon_indices = sorted(np.argsort(np.abs(ref_lons - lon))[:n_pixels])
line = [state, county, lat, lon, lat_indices[0], lat_indices[-1], lon_indices[0], lon_indices[-1]]
writer.writerow(line)
def combine_by_year(start_month, end_month, dir_var_tuple_list):
fill_value_dic = {'ndvi': -0.2, 'evi': -0.2, 'elevation': 0,
'lst_day': 280, 'lst_night': 280, 'sand': 101, 'clay': 101, 'silt': 101}
for year in range(2000, 2018):
fh_out = Dataset('../../experiment_data/spatial_temporal/nc_files_unmasked/{}.nc'.format(year), 'w')
var_list = []
n_t = end_month - start_month + 1
first_first_flag = True
first_flag = True
n_dim = {}
ppt_mask = None
for i_month, month in enumerate(range(start_month, end_month+1)):
for (f_dir, selected_vars) in dir_var_tuple_list:
if os.path.isfile(f_dir):
fh_in = Dataset(f_dir, 'r')
else:
fh_in = Dataset('{}/{}{}.nc'.format(f_dir, year, '{0:02}'.format(month)))
if first_first_flag:
for name, dim in fh_in.dimensions.items():
n_dim[name] = len(dim)
fh_out.createDimension(name, len(dim))
fh_out.createDimension('time', n_t)
outVar = fh_out.createVariable('time', 'int', ("time",))
outVar[:] = range(start_month, end_month + 1)
for v_name, varin in fh_in.variables.items():
if v_name == 'lat' or v_name == 'lon':
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
first_first_flag = False
if first_flag:
for v_name, varin in fh_in.variables.items():
if v_name in selected_vars:
var_list.append(v_name)
outVar = fh_out.createVariable(v_name, 'f4', ("time", "lat", "lon",))
# outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = ma.empty((n_t, n_dim['lat'], n_dim['lon']))
if v_name == 'ppt':
ppt_mask = ma.getmaskarray(fh_in.variables['ppt'][:])
assert ppt_mask is not None
for vname in selected_vars:
if vname != 'ppt':
var_value = ma.filled(fh_in.variables[vname][:], fill_value=fill_value_dic[vname])
var_value = ma.array(var_value, mask=ppt_mask)
else:
var_value = fh_in.variables[vname][:]
fh_out.variables[vname][i_month, :, :] = var_value
fh_in.close()
first_flag = False
print(var_list)
fh_out.close()
def generate_no_spatial_for_counties(yield_data_dir, ppt_file, county_location_file, out_dir, img_dir, croptype, start_month, end_month, start_index):
yield_data = pd.read_csv('{}/{}_2000_2018.csv'.format(yield_data_dir, croptype))[[
'Year', 'State ANSI', 'County ANSI', 'Value']]
yield_data.columns = ['year', 'state', 'county', 'value']
if yield_data.value.dtype != float:
yield_data['value'] = yield_data['value'].str.replace(',', '')
yield_data = yield_data.astype({'year': int, 'state': int, 'county': int, 'value': float})
ppt_fh = Dataset(ppt_file, 'r')
v_ppt = ppt_fh.variables['ppt'][0, :, :]
counties = pd.read_csv(county_location_file)
county_dic = {}
for c in counties.itertuples():
state, county, lat0, lat1, lon0, lon1 = c.state, c.county, c.lat0, c.lat1, c.lon0, c.lon1
county_dic[(state, county)] = [lat0, lat1, lon0, lon1]
csv_header = ['year', 'state', 'county', 'yield']
for climate_var in DYNAMIC_CLIMATE_VARS:
for month in map(str, range(start_month, end_month+1)):
csv_header.append(climate_var + "_" + month)
for climate_var in STATIC_CLIMATE_VARS:
csv_header.append(climate_var)
for climate_var in CLIMATE_VARS:
csv_header.append(climate_var + "_mean")
output_file = '{}/{}_{}_{}.csv'.format(out_dir, croptype, start_month, end_month)
n_t = end_month - start_month + 1
with open(output_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(csv_header)
for yd in yield_data.itertuples():
year, state, county, value = yd.year, yd.state, yd.county, yd.value
# no location info
if (state, county) not in county_dic:
continue
lat0, lat1, lon0, lon1 = county_dic[(state, county)]
assert lat1 - lat0 == 49
assert lon1 - lon0 == 49
selected_ppt = v_ppt[lat0:lat1 + 1, lon0:lon1 + 1]
if ma.count_masked(selected_ppt) != 0:
continue
values = [year, state, county, value]
value_dic = defaultdict(list)
if '{}.nc'.format(year) not in os.listdir(img_dir):
continue
fh = Dataset('{}/{}.nc'.format(img_dir, year))
for climate_var in DYNAMIC_CLIMATE_VARS:
for i_month in range(n_t):
selected_values = fh.variables[climate_var][i_month+start_index, lat0:lat1+1, lon0:lon1+1]
averaged = ma.mean(selected_values)
values.append(averaged)
value_dic[climate_var].append(averaged)
for climate_var in STATIC_CLIMATE_VARS:
selected_values = fh.variables[climate_var][0, lat0:lat1 + 1, lon0:lon1 + 1]
averaged = ma.mean(selected_values)
values.append(averaged)
value_dic[climate_var].append(averaged)
fh.close()
for climate_var in CLIMATE_VARS:
values.append(np.mean(value_dic[climate_var]))
writer.writerow(values)
def mask_non_major_states(in_dir, out_dir, mask_file, major_states):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fh_mask = Dataset(mask_file, 'r')
state_codes = fh_mask.variables['state_code'][:]
major_state_mask = ~np.in1d(state_codes, major_states).reshape(state_codes.shape)
for nc_file in os.listdir(in_dir):
if nc_file.endswith('.nc'):
fh_in = Dataset('{}/{}'.format(in_dir, nc_file), 'r')
fh_out = Dataset('{}/{}'.format(out_dir, nc_file), 'w')
for name, dim in fh_in.dimensions.items():
fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)
for v_name, varin in fh_in.variables.items():
outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name in ['lat', 'lon', 'time']:
outVar[:] = varin[:]
else:
if 'time' not in fh_in.variables:
outVar[:] = ma.array(varin[:], mask=major_state_mask)
else:
outVar[:] = ma.array(varin[:], mask=np.tile(major_state_mask, (varin[:].shape[0], 1)))
fh_in.close()
fh_out.close()
fh_mask.close()
def obtain_channel_wise_mean_std(img_dir):
mean_dic = {}
std_dic = {}
for month_index in range(8):
cv_dic = defaultdict(list)
for year in range(2000, 2014):
fh = Dataset('{}/{}.nc'.format(img_dir, year))
for v_name, varin in fh.variables.items():
if v_name in CLIMATE_VARS:
cv_dic[v_name].append(varin[month_index].compressed())
fh.close()
means = []
stds = []
for cv in CLIMATE_VARS:
values = np.asarray(cv_dic[cv])
means.append(np.mean(values))
stds.append(np.std(values))
mean_dic[month_index] = means
std_dic[month_index] = stds
with open('{}/monthly_channel_wise_mean.pkl'.format(img_dir), 'wb') as f:
pickle.dump(mean_dic, f)
with open('{}/monthly_channel_wise_std.pkl'.format(img_dir), 'wb') as f:
pickle.dump(std_dic, f)
if __name__ == '__main__':
# get_counties_lat_lon_indices('../../processed_data/counties/us_counties_cro_cvm_locations.csv',
# '../../processed_data/counties/lst/us_counties_cro_cvm_locations.csv',
# n_pixels=50)
# combine_by_year(start_month=2,
# end_month=9,
# dir_var_tuple_list=[
# ('../../processed_data/prism/combined_monthly_1km', ['ppt']),
# ('../../processed_data/ndvi/1km', ['ndvi', 'evi']),
# ('../../processed_data/elevation/1km.nc', ['elevation']),
# ('../../processed_data/lst/monthly_1km', ['lst_day', 'lst_night']),
# ('../../processed_data/soil_fraction/soil_fraction_usa_1km.nc', ['sand', 'clay', 'silt'])])
generate_no_spatial_for_counties(yield_data_dir='../../processed_data/crop_yield',
county_location_file='../../processed_data/counties/lst/us_counties_cro_cvm_locations.csv',
out_dir='../../experiment_data/no_spatial',
img_dir='../../experiment_data/spatial_temporal/nc_files',
croptype='soybeans',
start_month=3,
end_month=9,
start_index=1)
MAJOR_STATES = [1, 5, 10, 13, 17, 18, 19, 20, 21, 22, 24, 26, 27, 28, 29, 31, 34, 36, 37, 38, 39, 40, 42, 45,
46, 47, 48, 51, 55, 54, 12]
mask_non_major_states('../../experiment_data/spatial_temporal/nc_files_unmasked',
'../../experiment_data/spatial_temporal/nc_files',
'../../processed_data/counties/lst/us_counties.nc',
MAJOR_STATES)
| Context-Aware-Representation-Crop-Yield-Prediction-main | data_preprocessing/postprocess/combine_multi_vars.py |
"""
"""
from __future__ import print_function
from __future__ import division
import matplotlib
matplotlib.use('agg')
import glob
import os
import torch
import argparse
import warnings
import pandas
from torch.utils.data import Dataset, DataLoader
from utils import *
from metrics import *
from dataloaders import *
from models import GridSearchTrainer
from sklearn.exceptions import UndefinedMetricWarning
from utils.viz import tsne_plot, analysis_plot
from transforms import *
try:
# for python2
import cPickle
except ImportError:
# for python3
import _pickle as cPickle
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
# set reasonable pandas dataframe display defaults
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 1000)
torch.backends.cudnn.deterministic = True
def score(model, data_loader, classes, threshold=0.5, seed=1234, use_cuda=False, topSelection=None):
""" Generate classfication report """
np.random.seed(seed=int(seed))
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed)
model.eval()
y_proba, y_pred = model.predict(data_loader, threshold=threshold, binary=len(classes)==2, return_proba=True, topSelection=topSelection)
print(y_proba)
preds_table = "PID,Y_TRUE,Y_PROBA,Y_PRED\n"
preds_table += "\n".join(["{},{},{},{}".format(data[0], data[1], y_proba[i], y_pred[i]) for i,data in enumerate(data_loader.dataset.get_labels())])
try:
y_true = np.hstack([y.numpy() for x,y in data_loader])
results = classification_summary(y_true, y_pred, classes, y_proba)
preds = {"y_true":y_true, "y_pred":y_pred, "y_proba": y_proba}
return results, preds_table, preds
except:
preds = {"y_true":None, "y_pred":y_pred, "y_proba": y_proba}
return None, preds_table, preds
def load_dataset(args):
"""
Load UKBB datasets
Image centering statistics
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c/
max: 192
mean: 27.4613475359
std: 15.8350095314
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c_P/
max: 4095
mean: 2045.20689212
std: 292.707986212
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c_MAG/
max: 336.0
mean: 24.1274
std: 14.8176
:param args:
:return:
"""
if args.dataset == "UKBB":
if args.cache_data:
DataSet = UKBBCardiacMRICache
elif args.meta_data:
DataSet = UKBBCardiacMRIMeta
else:
DataSet = UKBBCardiacMRI
classes = ("TAV", "BAV")
# Get Preprocessing and Augmentation params
preprocessing, augmentation, postprocessing = get_data_config(args)
print_dict_pairs(preprocessing, title="Data Preprocessing Args")
print_dict_pairs(augmentation, title="Data Augmentation Args")
preprocessing["n_frames"] = args.n_frames
# Preprocessing data should be computed on ALL datasets (train, val,
# and test). This includes:
# - Frame Selection
# - Rescale Intensity
# - Gamma Correction
if (args.series == 3):
preprocess_data = compose_preprocessing_multi(preprocessing)
else:
preprocess_data = compose_preprocessing(preprocessing)
postprocess_data = None
if (postprocessing is not None):
if (args.series == 3):
postprocess_data = compose_postprocessing_multi(postprocessing)
else:
postprocess_data = compose_postprocessing(postprocessing)
test = DataSet(args.labelcsv, args.test,
series=args.series, N=args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
postprocess=postprocess_data,
seed=args.data_seed)
return test, classes
else:
logger.error("Dataset name not recognized")
def get_best_model_weights_path(weights_path, model_name, dataset, seed):
weights_path = "{}/{}_{}_{}".format(weights_path, dataset, model_name, str(seed))
paths = glob.glob("{}/{}_BEST".format(weights_path, model_name))
#paths = sorted(paths, key=lambda x: int(x.split('_')[-1]))
print("{}/{}_BEST".format(weights_path, model_name))
print("Weights loaded: {}".format(paths))
return paths[0]
def main(args):
ts = int(time.time())
# ------------------------------------------------------------------------------
# Load Dataset
# ------------------------------------------------------------------------------
test, classes = load_dataset(args)
logger.info("[TEST] {}".format(len(test)))
logger.info("Classes: {}".format(" ".join(classes)))
# ------------------------------------------------------------------------------
# Load Model and Hyperparameter Grid
# ------------------------------------------------------------------------------
args.model_weights_path = get_best_model_weights_path(args.model_weights_path, args.model_name, args.dataset, args.seed)
best_model = torch.load(args.model_weights_path)
model = best_model['model']
args.threshold = best_model['threshold']
print("Threshold: {}".format(args.threshold))
print("Best[DEV]score:")
print("'fit'0.000sec")
checkpoint_dir = "{}/{}_{}_{}".format(args.outdir, args.dataset, args.model_name, args.seed)
if not os.path.exists(args.outdir):
os.makedirs(args.outdir, exist_ok=True)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir, exist_ok=True)
# ------------------------------------------------------------------------------
# Score and Save Best Model
# ------------------------------------------------------------------------------
test_loader = DataLoader(test, batch_size=args.batch_size, shuffle=False, num_workers=0)
results, preds_table, preds = score(model, test_loader, classes, threshold=args.threshold,
seed=args.seed, use_cuda=args.use_cuda,
topSelection=args.top_selection)
if args.outdir:
if results is not None:
cPickle.dump(results, open("{!s}/results_{!s}.pkl".format(checkpoint_dir, args.seed), "wb"))
open("{}/predictions_{}.csv".format(checkpoint_dir, args.seed), "w").write(preds_table)
# ------------------------------------------------------------------------------
# Generate Plots and Reports
# ------------------------------------------------------------------------------
if args.outdir and args.report:
tsne_plot(model, test_loader, "{}/{}.".format(checkpoint_dir, args.seed),
seed=args.seed, use_cuda=args.use_cuda, threshold=args.threshold, fmt="pdf",
topSelection=args.top_selection, save_coords=args.tsne_save_coords,
pred_only=args.tsne_pred_only, save_embeds=args.save_embeds,
classes=args.tsne_classes)
plot_types=['plt_hist_plot', 'hist_plot', 'roc_curve', 'prc_curve']
#analysis_plot(model=model, data_loader=test_loader,
# outfpath="{}/{}.".format(checkpoint_dir, args.seed),
# types=plot_types, fmt="pdf",
# seed=args.seed, use_cuda=args.use_cuda)
analysis_plot(y_true=preds["y_true"], y_proba=preds["y_proba"],
outfpath="{}/{}.".format(checkpoint_dir, args.seed),
types=plot_types, fmt="pdf")
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--top_selection", type=int, default=None, help="the number of positive cases to select from the test set")
argparser.add_argument("-d", "--dataset", type=str, default="UKBB", help="dataset name")
argparser.add_argument("--threshold", type=float, default=0.5, help="threshold cutoff to use when evaluating test set")
argparser.add_argument("--model_weights_path", type=str, help="the path to the saved model weights, e.g. `--model_weights_path test` when file is `test/UKBB_Dense4012FrameRNN_14/Dense4012FrameRNN_BEST`.")
argparser.add_argument("--model_name", type=str, help="the name of the model")
argparser.add_argument("-a", "--dconfig", type=str, default=None, help="load data config JSON")
argparser.add_argument("-c", "--config", type=str, default=None, help="load model config JSON")
argparser.add_argument("-L", "--labelcsv", type=str, default="labels.csv", help="dataset labels csv filename")
argparser.add_argument("-o", "--outdir", type=str, default=None, help="save model to outdir")
argparser.add_argument("--test", type=str, default=None, help="test set")
argparser.add_argument("--data_seed", type=int, default=4321, help="random sample seed")
argparser.add_argument("-B", "--batch_size", type=int, default=4, help="batch size")
argparser.add_argument("-H", "--host_device", type=str, default="gpu", help="Host device (GPU|CPU)")
argparser.add_argument("-I", "--image_type", type=str, default='grey', choices=['grey', 'rgb'], help="the image type, grey/rgb")
argparser.add_argument("--use_cuda", action="store_true", help="whether to use GPU(CUDA)")
argparser.add_argument("--cache_data", action="store_true", help="whether to cache data into memory")
argparser.add_argument("--meta_data", action="store_true", help="whether to include meta data in model")
argparser.add_argument("-F", "--n_frames", type=int, default=30, help="number of frames to select from a series")
argparser.add_argument("--series", type=int, default=0, choices=[0, 1, 2, 3], help="which series to load for training")
argparser.add_argument("--report", action="store_true", help="generate summary plots")
argparser.add_argument("--seed", type=int, default=1234, help="random model seed")
argparser.add_argument("--quiet", action="store_true", help="suppress logging")
argparser.add_argument("--verbose", action="store_true", help="print debug information to log")
argparser.add_argument("--tsne_save_coords", action="store_true", help="whether to save coords of tsne.")
argparser.add_argument("--tsne_pred_only", action="store_true", help="whether to plot preds only in tsne.")
argparser.add_argument("--tsne_classes", default=None, type=int, action='append', help="the classes used to plot tsne plots. defaultto read from labels Y_TRUE.")
argparser.add_argument("--save_embeds", action="store_true", help="whether to save the embedding of test set.")
args = argparser.parse_args()
if not args.quiet:
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
if not torch.cuda.is_available() and args.host_device.lower() == 'gpu':
logger.error("Warning! CUDA not available, defaulting to CPU")
args.host_device = "cpu"
if torch.cuda.is_available():
logger.info("CUDA PyTorch Backends")
logger.info("torch.backends.cudnn.deterministic={}".format(torch.backends.cudnn.deterministic))
# print summary of this run
logger.info("python " + " ".join(sys.argv))
print_key_pairs(args.__dict__.items(), title="Command Line Args")
main(args)
| ukb-cardiac-mri-master | ukb/predict.py |
import argparse
import pandas as pd
from os import makedirs
from os.path import isdir
from ensemble import *
def main(args):
pd.set_option("display.width", 100)
if args.pids_csv is not None:
pids = list(pd.read_csv(args.pids_csv)[args.pids_key])
else:
pids = None
if args.output_dir is None:
output_dir = "{}/ensemble".format(args.results_dir)
else:
output_dir = args.output_dir
if not isdir(output_dir):
makedirs(output_dir)
if args.output_name is None:
output_name = "ensemble"
else:
output_name = args.output_name
experiment = Ensemble.from_folder(args.results_dir, args.dev_dir, pids=pids)
_ = experiment.median_vote(metric=args.metric)
_ = experiment.mv_vote()
if experiment.score:
print(experiment.score_dataframe)
experiment.score_dataframe.to_csv("{}/{}_score.csv".format(output_dir, output_name), index=False)
experiment.proba_dataframe.to_csv("{}/{}_proba.csv".format(output_dir, output_name), index=False)
experiment.pred_dataframe.to_csv("{}/{}_pred.csv".format(output_dir, output_name), index=False)
print("Ensembled results are saved into {}.".format(output_dir))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--results_dir", type=str, required=True, help="the folder where the results are")
argparser.add_argument("--dev_dir", type=str, required=True, help="the folder where the devset results are")
argparser.add_argument("--metric", type=str, default="f1_score", help="the metric for tuning threshold")
argparser.add_argument("--pids_csv", type=str, default=None, help="the csv of pids to filter the results")
argparser.add_argument("--pids_key", type=str, default="ID", help="the label for pids in the csv: ID/PID/etc.")
argparser.add_argument("--output_dir", type=str, default=None, help="folder to save the ensembled results")
argparser.add_argument("--output_name", type=str, default=None, help="name used to save the ensembled results")
args = argparser.parse_args()
main(args)
| ukb-cardiac-mri-master | ukb/ensemble.py |
"""
"""
from __future__ import print_function
from __future__ import division
import matplotlib
matplotlib.use('agg')
import os
import torch
import argparse
import warnings
import pandas
from torch.utils.data import Dataset, DataLoader
from utils import *
from metrics import *
from dataloaders import *
from models import GridSearchTrainer
from sklearn.exceptions import UndefinedMetricWarning
from utils.viz import tsne_plot, analysis_plot
from transforms import *
try:
# for python2
import cPickle
except ImportError:
# for python3
import _pickle as cPickle
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
# set reasonable pandas dataframe display defaults
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 1000)
torch.backends.cudnn.deterministic = True
def score(model, data_loader, classes, threshold=0.5, seed=1234, use_cuda=False, topSelection=None):
""" Generate classfication report """
np.random.seed(seed=int(seed))
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed)
y_proba, y_pred = model.predict(data_loader, threshold=threshold, binary=len(classes)==2, return_proba=True, topSelection=topSelection)
print(y_proba)
preds_table = "PID,Y_TRUE,Y_PROBA,Y_PRED\n"
preds_table += "\n".join(["{},{},{},{}".format(data[0], data[1], y_proba[i], y_pred[i]) for i,data in enumerate(data_loader.dataset.get_labels())])
try:
y_true = np.hstack([y.numpy() for x,y in data_loader])
results = classification_summary(y_true, y_pred, classes, y_proba)
preds = {"y_true":y_true, "y_pred":y_pred, "y_proba": y_proba}
return results, preds_table, preds
except:
preds = {"y_true":None, "y_pred":y_pred, "y_proba": y_proba}
return None, preds_table, preds
def load_dataset(args):
"""
Load UKBB or CIFAR10 datasets
Image centering statistics
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c/
max: 192
mean: 27.4613475359
std: 15.8350095314
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c_P/
max: 4095
mean: 2045.20689212
std: 292.707986212
/lfs/1/heartmri/coral32/flow_250_tp_AoV_bh_ePAT@c_MAG/
max: 336.0
mean: 24.1274
std: 14.8176
:param args:
:return:
"""
if args.dataset == "UKBB":
if args.cache_data:
DataSet = UKBBCardiacMRICache
elif args.meta_data:
DataSet = UKBBCardiacMRIMeta
else:
DataSet = UKBBCardiacMRI
classes = ("TAV", "BAV")
# Get Preprocessing and Augmentation params
preprocessing, augmentation, postprocessing = get_data_config(args)
print_dict_pairs(preprocessing, title="Data Preprocessing Args")
print_dict_pairs(augmentation, title="Data Augmentation Args")
preprocessing["n_frames"] = args.n_frames
# Preprocessing data should be computed on ALL datasets (train, val,
# and test). This includes:
# - Frame Selection
# - Rescale Intensity
# - Gamma Correction
if (args.series == 3):
preprocess_data = compose_preprocessing_multi(preprocessing)
else:
preprocess_data = compose_preprocessing(preprocessing)
# HACK ignore augmentations (for now)
# data augmentations only to be used during training
augment_train = None
if (augmentation is not None):
augment_train = compose_augmentation(augmentation, seed=args.data_seed)
postprocess_data = None
if (postprocessing is not None):
if (args.series == 3):
postprocess_data = compose_postprocessing_multi(postprocessing)
else:
postprocess_data = compose_postprocessing(postprocessing)
train = DataSet(args.labelcsv, args.train,
series=args.series, N=args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
augmentation=augment_train,
postprocess=postprocess_data,
rebalance=args.rebalance,
threshold=args.data_threshold,
seed=args.data_seed,
sample=args.sample,
sample_type=args.sample_type,
sample_split=args.sample_split,
n_samples=args.n_samples,
pos_samples=args.pos_samples,
neg_samples=args.neg_samples,
frame_label=args.use_frame_label,
rebalance_strategy=args.rebalance_strategy,
semi=args.semi, semi_dir=args.semi_dir, semi_csv=args.semi_csv)
# randomly split dev into stratified dev/test sets
if args.stratify_dev:
df = stratified_sample_dataset("{}/labels.csv".format(args.dev), args.seed)
dev = DataSet(df["dev"], args.dev,
series=args.series, N=args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
postprocess=postprocess_data,
seed=args.data_seed)
test = DataSet(df["test"], args.dev,
series=args.series, N = args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
postprocess=postprocess_data,
seed=args.data_seed)
# use manually defined dev/test sets
else:
dev = DataSet(args.devcsv, args.dev,
series=args.series, N=args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
postprocess=postprocess_data,
seed=args.data_seed)
if args.test:
test = DataSet(args.testcsv, args.test,
series=args.series, N=args.n_frames,
image_type=args.image_type,
preprocess=preprocess_data,
postprocess=postprocess_data,
seed=args.data_seed)
else:
test = None
return train, dev, test, classes
elif args.dataset == "CIFAR10":
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
data_root = "data/CIFAR10/"
if not os.path.exists(data_root):
os.mkdir(data_root)
num_samples = 500
train = CIFAR10(data_root, split="train", num_samples=num_samples)
dev = CIFAR10(data_root, split="dev", num_samples=num_samples)
test = CIFAR10(data_root, split="test", num_samples=num_samples)
return train, dev, test, classes
else:
logger.error("Dataset name not recognized")
def main(args):
ts = int(time.time())
# ------------------------------------------------------------------------------
# Load Dataset
# ------------------------------------------------------------------------------
train, dev, test, classes = load_dataset(args)
logger.info("[TRAIN] {}".format(len(train)))
logger.info("[DEV] {}".format(len(dev)))
if args.test:
logger.info("[TEST] {}".format(len(test)))
logger.info("Classes: {}".format(" ".join(classes)))
# ------------------------------------------------------------------------------
# Load Model and Hyperparameter Grid
# ------------------------------------------------------------------------------
# - model_class: target model class object
# - model_class_params: params required to initialize model
# - model_param_grid: hyperparameter search space
model_class, model_class_params, model_param_grid = get_model_config(args)
model_class_params["seq_max_seq_len"] = args.n_frames
model_class_params["pretrained"] = args.pretrained
model_class_params["requires_grad"] = args.requires_grad
# ------------------------------------------------------------------------------
# Train Model
# ------------------------------------------------------------------------------
checkpoint_dir = "{}/{}_{}_{}".format(args.outdir, args.dataset, model_class.__name__, args.seed)
if not os.path.exists(args.outdir):
os.makedirs(args.outdir, exist_ok=True)
trainer = GridSearchTrainer(model_class, model_class_params,
model_param_grid, args.n_model_search,
noise_aware=args.noise_aware,
use_cuda=args.use_cuda, seed=args.seed)
num_frames = args.n_frames if model_class.__name__ == "VGG16Net" else None
fit_time, model, _, tuned_threshold = trainer.fit(train, dev, test,
n_epochs=args.n_epochs, checkpoint_burn=args.checkpoint_burn,
update_freq=args.update_freq, checkpoint_freq=args.checkpoint_freq,
checkpoint_dir=checkpoint_dir, num_frames=num_frames,
tune_metric=args.tune_metric, metric=args.early_stopping_metric,
verbose=args.verbose)
# ------------------------------------------------------------------------------
# Score and Save Best Model
# ------------------------------------------------------------------------------
test_loader = DataLoader(test, batch_size=args.batch_size, shuffle=False, num_workers=0)
results, preds_table, preds = score(model, test_loader, classes, threshold=tuned_threshold,
seed=args.seed, use_cuda=args.use_cuda,
topSelection=args.top_selection)
results.update({"time":fit_time})
if args.outdir:
trainer.save(model.state_dict(), checkpoint_dir, "best.{}".format(args.seed))
cPickle.dump(results, open("{!s}/results_{!s}.pkl".format(checkpoint_dir, args.seed), "wb"))
open("{}/predictions_{}.csv".format(checkpoint_dir, args.seed), "w").write(preds_table)
# ------------------------------------------------------------------------------
# Generate Plots and Reports
# ------------------------------------------------------------------------------
if args.outdir and args.report:
tsne_plot(model, test_loader, "{}/{}.".format(checkpoint_dir, args.seed),
seed=args.seed, use_cuda=args.use_cuda, threshold=tuned_threshold, fmt="pdf",
topSelection=args.top_selection, save_coords=args.tsne_save_coords,
pred_only=args.tsne_pred_only, save_embeds=args.save_embeds,
classes=args.tsne_classes)
plot_types=['plt_hist_plot', 'hist_plot', 'roc_curve', 'prc_curve']
#analysis_plot(model=model, data_loader=test_loader,
# outfpath="{}/{}.".format(checkpoint_dir, args.seed),
# types=plot_types, fmt="pdf",
# seed=args.seed, use_cuda=args.use_cuda)
analysis_plot(y_true=preds["y_true"], y_proba=preds["y_proba"],
outfpath="{}/{}.".format(checkpoint_dir, args.seed),
types=plot_types, fmt="pdf")
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-d", "--dataset", type=str, default="UKBB", help="dataset name")
argparser.add_argument("-L", "--labelcsv", type=str, default="labels.csv", help="dataset labels csv filename")
argparser.add_argument("--devcsv", type=str, default="labels.csv", help="dev set labels csv filename")
argparser.add_argument("--testcsv", type=str, default="labels.csv", help="test set labels csv filename")
argparser.add_argument("--train", type=str, default=None, help="training set")
argparser.add_argument("--dev", type=str, default=None, help="dev (validation) set")
argparser.add_argument("--test", type=str, default=None, help="test set")
argparser.add_argument("--stratify_dev", action="store_true", help="split dev into stratified dev/test")
argparser.add_argument("-c", "--config", type=str, default=None, help="load model config JSON")
argparser.add_argument("-g", "--param_grid", type=str, default=None, help="load manual parameter grid from JSON")
argparser.add_argument("-p", "--params", type=str, default=None, help="load `key=value,...` pairs from command line")
argparser.add_argument("-o", "--outdir", type=str, default=None, help="save model to outdir")
argparser.add_argument("-a", "--dconfig", type=str, default=None, help="load data config JSON")
argparser.add_argument("-R", "--rebalance", action="store_true", help="rebalance training data")
argparser.add_argument("--data_threshold", type=float, default=0.5, help="threshold cutoff to use when sampling patients")
argparser.add_argument("--data_seed", type=int, default=4321, help="random sample seed")
argparser.add_argument("--sample", action="store_true", help="sample training data")
argparser.add_argument("--sample_type", type=int, default=0, choices=[0, 1, 2, 3],
help="sample method to use [1: Random Sample, 1: Threshold Random Sample, 2: Top/Bottom Sample]")
argparser.add_argument("--sample_split", type=float, default=0.5, help="ratio of 'positive' classes wanted")
argparser.add_argument("--n_samples", type=int, default=100, help="number of patients to sample")
argparser.add_argument("--pos_samples", type=int, default=0, help="number of positive patients to sample")
argparser.add_argument("--neg_samples", type=int, default=0, help="number of negative patients to sample")
argparser.add_argument("--rebalance_strategy", type=str, default="oversample", help="over/under sample")
argparser.add_argument("-B", "--batch_size", type=int, default=4, help="batch size")
argparser.add_argument("-N", "--n_model_search", type=int, default=1, help="number of models to search over")
argparser.add_argument("-S", "--early_stopping_metric", type=str, default="roc_auc_score", help="the metric for checkpointing the model")
argparser.add_argument("-T", "--tune_metric", type=str, default="roc_auc_score", help="the metric for "
"tuning the threshold. str-`roc_auc_score` for metric, float-`0.6` for fixed threshold")
argparser.add_argument("-E", "--n_epochs", type=int, default=1, help="number of training epochs")
argparser.add_argument("--checkpoint_burn", type=int, default=1, help="minimum number of training epochs before checkpointing")
argparser.add_argument("-M", "--n_procs", type=int, default=1, help="number processes (per model, CPU only)")
argparser.add_argument("-W", "--n_workers", type=int, default=1, help="number of grid search workers")
argparser.add_argument("-H", "--host_device", type=str, default="gpu", help="Host device (GPU|CPU)")
argparser.add_argument("-U", "--update_freq", type=int, default=5, help="progress bar update frequency")
argparser.add_argument("-C", "--checkpoint_freq", type=int, default=5, help="checkpoint frequency")
argparser.add_argument("-I", "--image_type", type=str, default='grey', choices=['grey', 'rgb'], help="the image type, grey/rgb")
argparser.add_argument("--use_cuda", action="store_true", help="whether to use GPU(CUDA)")
argparser.add_argument("--cache_data", action="store_true", help="whether to cache data into memory")
argparser.add_argument("--meta_data", action="store_true", help="whether to include meta data in model")
argparser.add_argument("--semi", action="store_true", help="whether to use semi model")
argparser.add_argument("--semi_dir", type=str, default='/lfs/1/heartmri/train32', help="path to train folder in semi model")
argparser.add_argument("--semi_csv", type=str, default="labels.csv", help="semi dataset labels csv filename")
argparser.add_argument("-F", "--n_frames", type=int, default=30, help="number of frames to select from a series")
argparser.add_argument("--use_frame_label", action="store_true", help="whether to use frame level labels.")
argparser.add_argument("--pretrained", action="store_true", help="whether to load pre_trained weights.")
argparser.add_argument("--requires_grad", action="store_true", help="whether to fine tuning the pre_trained model.")
argparser.add_argument("--noise_aware", action="store_true", help="whether to train on probability labels.")
argparser.add_argument("--series", type=int, default=0, choices=[0, 1, 2, 3], help="which series to load for training")
argparser.add_argument("--report", action="store_true", help="generate summary plots")
argparser.add_argument("--seed", type=int, default=1234, help="random model seed")
argparser.add_argument("--quiet", action="store_true", help="suppress logging")
argparser.add_argument("--verbose", action="store_true", help="print debug information to log")
argparser.add_argument("--top_selection", type=int, default=None, help="the number of positive cases to select from the test set")
argparser.add_argument("--tsne_save_coords", action="store_true", help="whether to save coords of tsne.")
argparser.add_argument("--tsne_pred_only", action="store_true", help="whether to plot preds only in tsne.")
argparser.add_argument("--tsne_classes", default=None, type=int, action='append', help="the classes used to plot tsne plots. defaultto read from labels Y_TRUE.")
argparser.add_argument("--save_embeds", action="store_true", help="whether to save the embedding of test set.")
args = argparser.parse_args()
if not args.quiet:
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
if not torch.cuda.is_available() and args.host_device.lower() == 'gpu':
logger.error("Warning! CUDA not available, defaulting to CPU")
args.host_device = "cpu"
if torch.cuda.is_available():
logger.info("CUDA PyTorch Backends")
logger.info("torch.backends.cudnn.deterministic={}".format(torch.backends.cudnn.deterministic))
# print summary of this run
logger.info("python " + " ".join(sys.argv))
print_key_pairs(args.__dict__.items(), title="Command Line Args")
main(args)
| ukb-cardiac-mri-master | ukb/train.py |
"""
Phase Contrast Cardiac MRI Segmentation
Prepare MRIs for training a CNN model. Given an input directory of numpy image tensors
containing phase contrast cardiac MRIs:
- Generate candidate value segmentations
- Rank candidates in terms of the most likely atrial value
- Write segmentation masks to numpy files
- Export 32x32, 48x48 cropped images
@author jason-fries [at] stanford [dot] edu
"""
from __future__ import print_function
import os
import re
import sys
import time
import glob
import logging
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.measure import label
from skimage import filters, segmentation
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square, dilation, erosion
from scipy.ndimage.filters import uniform_filter
from skimage.restoration import denoise_wavelet, denoise_nl_means
from skimage.transform import rescale
from skimage.morphology import square, disk
from skimage.filters import threshold_local
from skimage import img_as_float, img_as_ubyte
from utils import *
logger = logging.getLogger(__name__)
def get_centroid(x, y, weights=None):
"""
Compute average of provided points. Optionally weight points (doesn't usually matter).
:param x:
:param y:
:param weights:
:return:
"""
x_mu = np.average(x, weights=weights).astype(int)
y_mu = np.average(y, weights=weights).astype(int)
return [x_mu, y_mu]
def score_segmentations(img, labeled, weighted_centroid=True, min_threshold=2, max_threshold=1000):
"""
Compute a pixel mask for each labeled segment and calculate it's centroid.
Discard masks with more than max_threshold pixels or less than min_threshold.
:param img:
:param labeled:
:param weighted_centroid:
:param min_threshold:
:param max_threshold:
:return:
"""
segments = []
for s_id in range(max(labeled.flatten()) + 1):
# get coordinates of this segment
y, x = np.where(labeled == s_id)
# pixel weights
w = img[labeled == s_id]
num_pixels = len(w.flatten())
if num_pixels >= max_threshold or num_pixels <= min_threshold:
continue
segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
# rank candidates
return rank_valve_cands(sorted(segments, reverse=1))
def rank_valve_cands(segments):
"""
Heuristic for selecting probable atrial valve. Take top 2 weighted segments and
check their spatial orientation. Basic idea is that the atrial valve is *usually*
the largest, highest intensity region located in the lower left region of the MRI image.
2/14/2018 Spot check of 194 examples: 192/194 correct
:param segments:
:return:
"""
assert len(segments) > 0
if len(segments) == 1:
return segments[0:1]
# select top 2 candidates
a = segments[0]
b = segments[1]
c = [] if len(segments) > 2 else segments[2:]
# segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
a_x, a_y = a[-1]
b_x, b_y = b[-1]
a_w = a[0]
b_w = b[0]
# when there is a large disparity between weighted areas, use the largest area
if b_w < 0.50 * a_w:
return segments
# check spatial position of 1st ranked segment vs. 2nd ranked
if (a_x >= b_x and a_y <= b_y) or (a_x <= b_x and a_y <= b_y):
target = [b, a] + c
else:
target = segments
return target
def get_segmentation_masks(labeled, segments):
"""
n x height x width
1...n segmentation masks
Each layer is a single region, ranked by liklihood of being the atrial valve
Last layer is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
masks = []
for seg in segments:
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[mask != seg_id] = 0
mask[mask == seg_id] = 1
masks.append(mask)
mask = np.copy(labeled)
mask[mask == 0] = 100
mask[mask != 100] = 0
mask[mask == 100] = 1
masks.append(mask)
return np.array(masks, dtype=np.float32)
def get_segmentation_masks_v2(labeled, segments):
"""
Array of masks, each with a unique int id, 1...n
Each "layer" is a single region, ranked by liklihood of being the atrial valve 1..n
0 is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
mask = np.zeros(labeled.shape)
for i,seg in enumerate(segments):
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[np.where(labeled == seg_id)] = i+1
return mask
def crop(img, bbox):
"""
Crop image. Accepts frame data (frames X height X width) or a single 2D image
:param x:
:param bbox:
:return:
"""
assert len(img.shape) >= 2
if len(img.shape) == 3:
return img[...,bbox[0]:bbox[1],bbox[2]:bbox[3]]
else:
return img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
def get_crop_region(x, y, dim=48):
"""
Get bounding box centered on the centroid of the point set x,y.
:param max_dim:
:return:
"""
width = max(x) - min(x)
height = max(y) - min(y)
x_pad = (dim - width) / 2
y_pad = (dim - height) / 2
# add pixels as needed
x_slack = 0
y_slack = 0
if (2 * x_pad) + width != dim:
x_slack = dim - ((2 * x_pad) + width)
if (2 * y_pad) + height != dim:
y_slack = dim - ((2 * y_pad) + height)
return [min(x) - x_pad - x_slack, max(x) + x_pad, min(y) - y_pad - y_slack, max(y) + y_pad]
def localize_aortic_valve(img, pooling="std", outfpath=None, debug=False):
"""
Use a set of heuristics to find the region of the aortic valve.
:return:
"""
# compute pooled pixel intensities
X = np.std(img, axis=0) if pooling == "std" else np.max(img, axis=0)
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
masks = get_segmentation_masks(labeled, segments)
# debug: save segmentations as a PNG
if debug:
target = segments[0]
cx, cy = target[-1]
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
return masks
def segment(X, upscale=1.0, denoise=False):
"""
:param X:
:param upscale:
:param denoise:
:return:
"""
if upscale > 1.0:
X = rescale(X, upscale)
if denoise:
X = denoise_wavelet(X)
thresh = filters.threshold_otsu(X)
bw = closing(X > thresh, square(3))
cleared = clear_border(bw)
cleared = rescale(cleared, 1.0 / upscale)
return label(cleared)
def export_segment(pid, fpath, fpath2, fpath3, outfpath, outfpath2, outfpath3,
dim, pooling="none", mask_type="none", fmt="npy", debug=True):
"""
Given an MRI numpy image of dim: frames X height X width,
generate a segmentation mask for valve candidates.
Segmentation code based on sample from
http://douglasduhaime.com/posts/simple-image-segmentation-with-scikit-image.html
:param fpath:
:param outfpath:
:param dim: crop dimensions
:param fmt: (frames|max_pool|std_pool|video) image format options
:param mask_type: (None|hard|soft) DEFAULT: None
:param debug:
:return:
"""
# 1: LOAD/PREPROCESS IMAGE
img = np.load(fpath)
if len(img.shape) != 3:
raise ValueError('DICOM / numpy array is empty')
# compute pixel intensity SD percentiles
X = np.std(img, axis=0)
# 2: SEGMENTATION
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
target = segments[0]
cx, cy = target[-1]
# debug: save segmentations as a PNG
if debug:
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
# save all valve masks (index 0 is the most likely atrial valve)
masks = get_segmentation_masks(labeled, segments)
# debug: dump each image mask as a PNG
if debug:
for m in range(masks.shape[0]):
plt.figure(figsize=(6, 6))
plt.imshow(masks[m], cmap='tab10')
plt.savefig(outfpath + "_{}".format(m))
plt.close()
# get segment mask points, compute bounding box, and crop original image
px, py = np.where(masks[0] == 1)
print("Patient X :", px)
print("Patient Y :", py)
bbox = get_crop_region(px, py, dim)
print("Bbox :", bbox)
print("X Center :", (bbox[1] + bbox[0])/2)
print("Y Center :", (bbox[3] + bbox[2])/2)
c_img = crop(img, bbox)
# Load Other Series Images and crop based on bbox
img2 = np.load(fpath2)
img3 = np.load(fpath3)
c_img2 = crop(img2, bbox)
c_img3 = crop(img3, bbox)
# mask data: by default, don't mask anything
mask = np.ones((bbox[1] - bbox[0], bbox[3] - bbox[2]), dtype=np.float32)
if mask_type in ["soft", "hard"]:
msk = np.copy(masks[0])
exp_msk = dilation(msk)
exp_msk = crop(exp_msk, bbox)
mask = filters.gaussian(exp_msk, sigma=1.01) if mask_type == "soft" else exp_msk
# 3: EXPORT IMAGE DATA
#img_path = "{}_{}x{}".format(outfpath, dim, dim)
img_path = "{}".format(outfpath)
img_path = "{}_{}pool".format(img_path, pooling) if pooling != "none" else img_path
img_path = "{}_{}".format(img_path, mask_type) if mask_type != "none" else img_path
img_path2 = "{}".format(outfpath2)
img_path2 = "{}_{}pool".format(img_path2, pooling) if pooling != "none" else img_path2
img_path2 = "{}_{}".format(img_path2, mask_type) if mask_type != "none" else img_path2
img_path3 = "{}".format(outfpath3)
img_path3 = "{}_{}pool".format(img_path3, pooling) if pooling != "none" else img_path3
img_path3 = "{}_{}".format(img_path3, mask_type) if mask_type != "none" else img_path3
# pool data
if pooling in ["max", "std", "z_add"]:
if pooling == "max":
c_img = np.max(c_img, axis=0)
c_img2 = np.max(c_img2, axis=0)
c_img3 = np.max(c_img3, axis=0)
elif pooling == "std":
c_img = np.std(c_img, axis=0)
c_img2 = np.std(c_img2, axis=0)
c_img3 = np.std(c_img3, axis=0)
elif pooling == "z_add":
c_img = z_score_normalize(c_img)
c_img = np.sum(c_img, axis=0)
c_img = (c_img - np.min(c_img)) / (np.max(c_img) - np.min(c_img))
c_img2 = z_score_normalize(c_img2)
c_img2 = np.sum(c_img2, axis=0)
c_img2 = (c_img2 - np.min(c_img2)) / (np.max(c_img2) - np.min(c_img2))
c_img3 = z_score_normalize(c_img3)
c_img3 = np.sum(c_img3, axis=0)
c_img3 = (c_img3 - np.min(c_img3)) / (np.max(c_img3) - np.min(c_img3))
c_img = (mask * c_img)
# export format
if fmt == "png":
plt.figure(figsize=(4, 4))
plt.imshow(c_img, cmap='gray')
plt.savefig(outfpath)
plt.figure(figsize=(4, 4))
plt.imshow(c_img2, cmap='gray')
plt.savefig(outfpath2)
plt.figure(figsize=(4, 4))
plt.imshow(c_img3, cmap='gray')
plt.savefig(outfpath3)
elif fmt == "mp4":
seq_to_video(c_img, img_path, width=4, height=4)
seq_to_video(c_img2, img_path2, width=4, height=4)
seq_to_video(c_img3, img_path3, width=4, height=4)
else:
np.save(img_path, c_img)
np.save(img_path2, c_img2)
np.save(img_path3, c_img3)
# save segmentation masks
# np.save("{}_masks".format(outfpath), masks.astype(np.int8))
@timeit
def main(args):
np.random.seed(1234)
# ------------------------------------------------------------------------------
# Load Files
# ------------------------------------------------------------------------------
filelist = glob.glob("{}*.npy".format(args.indir))
if args.cohort or args.patients:
# filter images to only include those in the provided cohort
if args.cohort:
ids = map(lambda x:x.strip(), open(args.cohort,"rU").read().splitlines())
else:
ids = args.patients.strip().split(",")
rgx = "({})".format("|".join(ids))
filelist = [fn for fn in filelist if re.search(rgx, fn)]
filelist = np.random.choice(filelist, args.samples, replace=False) if args.samples and len(filelist)>args.samples else filelist
logger.info("Loaded {} MRIs".format(len(filelist)))
# ------------------------------------------------------------------------------
# Segment MRIs
# ------------------------------------------------------------------------------
errors = []
for fpath in filelist:
try:
pid = fpath.split("/")[-1].split(".")[0] #re.search("^(\d+)[_]", fpath.split("/")[-1]).group(1)
print("PATIENT ID:", pid)
fpath2 = "{}/{}.npy".format(args.indir2, pid)
fpath3 = "{}/{}.npy".format(args.indir3, pid)
outfpath = "{}/{}".format(args.outdir, pid)
outfpath2 = "{}/{}".format(args.outdir2, pid)
outfpath3 = "{}/{}".format(args.outdir3, pid)
#img = np.load(fpath)
#masks = localize_aortic_valve(img)
#bbox = [region for region in regionprops(masks[0])][0].bbox
#bbox = get_crop_region(x, y, dim=48):
export_segment(pid, fpath, fpath2=fpath2, fpath3=fpath3,
dim=args.dim, outfpath=outfpath,
outfpath2=outfpath2, outfpath3=outfpath3,
pooling=args.pooling, mask_type=args.mask,
fmt=args.format, debug=args.debug)
except Exception as e:
logger.error("[{}] segmenting image: {}".format(pid, e))
errors += [pid]
num_errors = len(errors)
if num_errors > 0:
logger.error("{} images failed during segmentation".format(num_errors))
logger.info("{} images sucessfully segmented".format(len(filelist) - num_errors))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--indir", type=str, required=True, help="load MRIs from indir")
argparser.add_argument("-o", "--outdir", type=str, required=True, help="save files to outdir")
argparser.add_argument("--indir2", type=str, required=True, help="location to MRI series 2")
argparser.add_argument("--outdir2", type=str, required=True, help="save files to outdir 2")
argparser.add_argument("--indir3", type=str, required=True, help="location of MRI series 3")
argparser.add_argument("--outdir3", type=str, required=True, help="save files to outdir 3")
argparser.add_argument("-c", "--cohort", type=None, default=None, help="load from list of patient pseudo IDs")
argparser.add_argument("-p", "--patients", type=str, default=None, help="load string of patient pseudo IDs")
argparser.add_argument("-n", "--samples", type=int, default=None, help="sample n MRI sequences")
argparser.add_argument("-D", "--dim", type=int, default=32, help="output dimension - default: 32x32")
argparser.add_argument("-P", "--pooling", action='store', choices=['none', 'max', 'std', 'mean', 'z_add'], default="none",
help="pooling method")
argparser.add_argument("-M", "--mask", action='store', choices=['none', 'hard', 'soft'],
default="none", help="apply segmentation mask to atrial valve")
argparser.add_argument("-F", "--format", action='store', choices=['npy', 'png', 'mp4'],
default="npy", help="export format")
argparser.add_argument("--create", type=int, default=None, help="create random images")
argparser.add_argument("--debug", action="store_true", help="dump debug PNGs of all segmentation masks")
argparser.add_argument("--quiet", action="store_true", help="suppress logging")
args = argparser.parse_args()
# enable logging
if not args.quiet:
FORMAT = '%(levelname)s|%(name)s| %(message)s'
logging.basicConfig(format=FORMAT, stream=sys.stdout, level=logging.INFO)
# generate a random dataset so that we can test data loading
if args.create:
generate_random_dataset(args.outdir, n_samples=args.create, dim=(30, args.dim, args.dim))
sys.exit()
if args.format == "mp4" and args.pooling != "none":
logger.error("pooled data cannot be exported to MP4")
elif args.format == "png" and args.pooling not in ["max", "std", "mean", "z_add"]:
logger.error("un-pooled data cannot be exported to PNG")
sys.exit()
# print all argument variables
print_key_pairs(args.__dict__.items(), title="Command Line Args")
main(args)
| ukb-cardiac-mri-master | ukb/segmentMultiple.py |
"""
Phase Contrast Cardiac MRI Segmentation
Prepare MRIs for training a CNN model. Given an input directory of numpy image tensors
containing phase contrast cardiac MRIs:
- Generate candidate value segmentations
- Rank candidates in terms of the most likely atrial value
- Write segmentation masks to numpy files
- Export 32x32, 48x48 cropped images
@author jason-fries [at] stanford [dot] edu
"""
from __future__ import print_function
import os
import re
import sys
import time
import glob
import logging
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.measure import label
from skimage import filters, segmentation
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square, dilation, erosion
from scipy.ndimage.filters import uniform_filter
from skimage.restoration import denoise_wavelet, denoise_nl_means
from skimage.transform import rescale
from skimage.morphology import square, disk
from skimage.filters import threshold_local
from skimage import img_as_float, img_as_ubyte
from utils import *
logger = logging.getLogger(__name__)
def get_centroid(x, y, weights=None):
"""
Compute average of provided points. Optionally weight points (doesn't usually matter).
:param x:
:param y:
:param weights:
:return:
"""
x_mu = np.average(x, weights=weights).astype(int)
y_mu = np.average(y, weights=weights).astype(int)
return [x_mu, y_mu]
def score_segmentations(img, labeled, weighted_centroid=True, min_threshold=2, max_threshold=1000):
"""
Compute a pixel mask for each labeled segment and calculate it's centroid.
Discard masks with more than max_threshold pixels or less than min_threshold.
:param img:
:param labeled:
:param weighted_centroid:
:param min_threshold:
:param max_threshold:
:return:
"""
segments = []
for s_id in range(max(labeled.flatten()) + 1):
# get coordinates of this segment
y, x = np.where(labeled == s_id)
# pixel weights
w = img[labeled == s_id]
num_pixels = len(w.flatten())
if num_pixels >= max_threshold or num_pixels <= min_threshold:
continue
segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
# rank candidates
return rank_valve_cands(sorted(segments, reverse=1))
def rank_valve_cands(segments):
"""
Heuristic for selecting probable atrial valve. Take top 2 weighted segments and
check their spatial orientation. Basic idea is that the atrial valve is *usually*
the largest, highest intensity region located in the lower left region of the MRI image.
2/14/2018 Spot check of 194 examples: 192/194 correct
:param segments:
:return:
"""
assert len(segments) > 0
if len(segments) == 1:
return segments[0:1]
# select top 2 candidates
a = segments[0]
b = segments[1]
c = [] if len(segments) > 2 else segments[2:]
# segments.append([np.sum(w), s_id, num_pixels, get_centroid(x, y, weights=w)])
a_x, a_y = a[-1]
b_x, b_y = b[-1]
a_w = a[0]
b_w = b[0]
# when there is a large disparity between weighted areas, use the largest area
if b_w < 0.50 * a_w:
return segments
# check spatial position of 1st ranked segment vs. 2nd ranked
if (a_x >= b_x and a_y <= b_y) or (a_x <= b_x and a_y <= b_y):
target = [b, a] + c
else:
target = segments
return target
def get_segmentation_masks(labeled, segments):
"""
n x height x width
1...n segmentation masks
Each layer is a single region, ranked by liklihood of being the atrial valve
Last layer is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
masks = []
for seg in segments:
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[mask != seg_id] = 0
mask[mask == seg_id] = 1
masks.append(mask)
mask = np.copy(labeled)
mask[mask == 0] = 100
mask[mask != 100] = 0
mask[mask == 100] = 1
masks.append(mask)
return np.array(masks, dtype=np.float32)
def get_segmentation_masks_v2(labeled, segments):
"""
Array of masks, each with a unique int id, 1...n
Each "layer" is a single region, ranked by liklihood of being the atrial valve 1..n
0 is the inverse mask (i.e., all non-valve areas)
:param X:
:return:
"""
mask = np.zeros(labeled.shape)
for i,seg in enumerate(segments):
_, seg_id, _, _ = seg
mask = np.copy(labeled)
mask[np.where(labeled == seg_id)] = i+1
return mask
def crop(img, bbox):
"""
Crop image. Accepts frame data (frames X height X width) or a single 2D image
:param x:
:param bbox:
:return:
"""
assert len(img.shape) >= 2
if len(img.shape) == 3:
return img[...,bbox[0]:bbox[1],bbox[2]:bbox[3]]
else:
return img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
def get_crop_region(x, y, dim=48):
"""
Get bounding box centered on the centroid of the point set x,y.
:param max_dim:
:return:
"""
width = max(x) - min(x)
height = max(y) - min(y)
x_pad = (dim - width) / 2
y_pad = (dim - height) / 2
# add pixels as needed
x_slack = 0
y_slack = 0
if (2 * x_pad) + width != dim:
x_slack = dim - ((2 * x_pad) + width)
if (2 * y_pad) + height != dim:
y_slack = dim - ((2 * y_pad) + height)
return [min(x) - x_pad - x_slack, max(x) + x_pad, min(y) - y_pad - y_slack, max(y) + y_pad]
def localize_aortic_valve(img, pooling="std", outfpath=None, debug=False):
"""
Use a set of heuristics to find the region of the aortic valve.
:return:
"""
# compute pooled pixel intensities
X = np.std(img, axis=0) if pooling == "std" else np.max(img, axis=0)
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
masks = get_segmentation_masks(labeled, segments)
# debug: save segmentations as a PNG
if debug:
target = segments[0]
cx, cy = target[-1]
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
return masks
def segment(X, upscale=1.0, denoise=False):
"""
:param X:
:param upscale:
:param denoise:
:return:
"""
if upscale > 1.0:
X = rescale(X, upscale)
if denoise:
X = denoise_wavelet(X)
thresh = filters.threshold_otsu(X)
bw = closing(X > thresh, square(3))
cleared = clear_border(bw)
cleared = rescale(cleared, 1.0 / upscale)
return label(cleared)
def export_segment(fpath, outfpath, dim, pooling="none", mask_type="none", fmt="npy", debug=True):
"""
Given an MRI numpy image of dim: frames X height X width,
generate a segmentation mask for valve candidates.
Segmentation code based on sample from
http://douglasduhaime.com/posts/simple-image-segmentation-with-scikit-image.html
:param fpath:
:param outfpath:
:param dim: crop dimensions
:param fmt: (frames|max_pool|std_pool|video) image format options
:param mask_type: (None|hard|soft) DEFAULT: None
:param debug:
:return:
"""
# 1: LOAD/PREPROCESS IMAGE
img = np.load(fpath)
if len(img.shape) != 3:
raise ValueError('DICOM / numpy array is empty')
# compute pixel intensity SD percentiles
X = np.std(img, axis=0)
# 2: SEGMENTATION
labeled = segment(X, upscale=1.0, denoise=False)
# rank segment candidates (most likely atrial valve)
segments = score_segmentations(X, labeled)
target = segments[0]
cx, cy = target[-1]
# debug: save segmentations as a PNG
if debug:
plt.figure(figsize=(6, 6))
plt.imshow(labeled, cmap='tab10')
plt.scatter(x=cx, y=cy, c='r', s=20)
plt.savefig(outfpath)
plt.close()
# save all valve masks (index 0 is the most likely atrial valve)
masks = get_segmentation_masks(labeled, segments)
# debug: dump each image mask as a PNG
if debug:
for m in range(masks.shape[0]):
plt.figure(figsize=(6, 6))
plt.imshow(masks[m], cmap='tab10')
plt.savefig(outfpath + "_{}".format(m))
plt.close()
# get segment mask points, compute bounding box, and crop original image
px, py = np.where(masks[0] == 1)
bbox = get_crop_region(px, py, dim)
c_img = crop(img, bbox)
# mask data: by default, don't mask anything
mask = np.ones((bbox[1] - bbox[0], bbox[3] - bbox[2]), dtype=np.float32)
if mask_type in ["soft", "hard"]:
msk = np.copy(masks[0])
exp_msk = dilation(msk)
exp_msk = crop(exp_msk, bbox)
mask = filters.gaussian(exp_msk, sigma=1.01) if mask_type == "soft" else exp_msk
# 3: EXPORT IMAGE DATA
img_path = "{}_{}x{}".format(outfpath, dim, dim)
img_path = "{}_{}pool".format(img_path, pooling) if pooling != "none" else img_path
img_path = "{}_{}".format(img_path, mask_type) if mask_type != "none" else img_path
# pool data
if pooling in ["max", "std", "z_add"]:
if pooling == "max":
c_img = np.max(c_img, axis=0)
elif pooling == "std":
c_img = np.std(c_img, axis=0)
elif pooling == "z_add":
c_img = z_score_normalize(c_img)
c_img = np.sum(c_img, axis=0)
c_img = (c_img - np.min(c_img)) / (np.max(c_img) - np.min(c_img))
c_img = (mask * c_img)
# export format
if fmt == "png":
plt.figure(figsize=(4, 4))
plt.imshow(c_img, cmap='gray')
plt.savefig(outfpath)
elif fmt == "mp4":
seq_to_video(c_img, img_path, width=4, height=4)
else:
np.save(img_path, c_img)
# save segmentation masks
np.save("{}_masks".format(outfpath), masks.astype(np.int8))
@timeit
def main(args):
np.random.seed(1234)
# ------------------------------------------------------------------------------
# Load Files
# ------------------------------------------------------------------------------
filelist = glob.glob("{}*.npy".format(args.indir))
if args.cohort or args.patients:
# filter images to only include those in the provided cohort
if args.cohort:
ids = map(lambda x:x.strip(), open(args.cohort,"rU").read().splitlines())
else:
ids = args.patients.strip().split(",")
rgx = "({})".format("|".join(ids))
filelist = [fn for fn in filelist if re.search(rgx, fn)]
filelist = np.random.choice(filelist, args.samples, replace=False) if args.samples and len(filelist)>args.samples else filelist
logger.info("Loaded {} MRIs".format(len(filelist)))
# ------------------------------------------------------------------------------
# Segment MRIs
# ------------------------------------------------------------------------------
errors = []
for fpath in filelist:
try:
pid = re.search("^(\d+)[_]", fpath.split("/")[-1]).group(1)
outfpath = "{}/{}".format(args.outdir, pid)
#img = np.load(fpath)
#masks = localize_aortic_valve(img)
#bbox = [region for region in regionprops(masks[0])][0].bbox
#bbox = get_crop_region(x, y, dim=48):
export_segment(fpath, dim=args.dim, outfpath=outfpath, pooling=args.pooling,
mask_type=args.mask, fmt=args.format, debug=args.debug)
except Exception as e:
logger.error("[{}] segmenting image: {}".format(pid, e))
errors += [pid]
num_errors = len(errors)
if num_errors > 0:
logger.error("{} images failed during segmentation".format(num_errors))
logger.info("{} images sucessfully segmented".format(len(filelist) - num_errors))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--indir", type=str, default=None, help="load MRIs from indir")
argparser.add_argument("-o", "--outdir", type=str, default=None, help="save files to outdir")
argparser.add_argument("-c", "--cohort", type=None, default=None, help="load from list of patient pseudo IDs")
argparser.add_argument("-p", "--patients", type=str, default=None, help="load string of patient pseudo IDs")
argparser.add_argument("-n", "--samples", type=int, default=None, help="sample n MRI sequences")
argparser.add_argument("-D", "--dim", type=int, default=32, help="output dimension - default: 32x32")
argparser.add_argument("-P", "--pooling", action='store', choices=['none', 'max', 'std', 'mean', 'z_add'], default="none",
help="pooling method")
argparser.add_argument("-M", "--mask", action='store', choices=['none', 'hard', 'soft'],
default="none", help="apply segmentation mask to atrial valve")
argparser.add_argument("-F", "--format", action='store', choices=['npy', 'png', 'mp4'],
default="npy", help="export format")
argparser.add_argument("--create", type=int, default=None, help="create random images")
argparser.add_argument("--debug", action="store_true", help="dump debug PNGs of all segmentation masks")
argparser.add_argument("--quiet", action="store_true", help="suppress logging")
args = argparser.parse_args()
# enable logging
if not args.quiet:
FORMAT = '%(levelname)s|%(name)s| %(message)s'
logging.basicConfig(format=FORMAT, stream=sys.stdout, level=logging.INFO)
# generate a random dataset so that we can test data loading
if args.create:
generate_random_dataset(args.outdir, n_samples=args.create, dim=(30, args.dim, args.dim))
sys.exit()
if args.format == "mp4" and args.pooling != "none":
logger.error("pooled data cannot be exported to MP4")
elif args.format == "png" and args.pooling not in ["max", "std", "mean", "z_add"]:
logger.error("un-pooled data cannot be exported to PNG")
sys.exit()
# print all argument variables
print_key_pairs(args.__dict__.items(), title="Command Line Args")
main(args)
| ukb-cardiac-mri-master | ukb/segment.py |
"""
Hacky script to generate synthetic data to debug model training.
Default configuration generates sequences based on empirical attributes
"""
from __future__ import print_function
import os
import sys
import math
import argparse
import numpy as np
from skimage import draw
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import skimage
from skimage.util import random_noise
from skimage.draw import polygon
from skimage.filters import gaussian
try:
# for python2
import cPickle
except ImportError:
# for python3
import _pickle as cPickle
from utils import *
# empirical distributions from 5000K MRI samples
window_counts = {2: 4, 3: 7, 4: 5, 5: 13, 6: 31, 7: 188, 8: 609, 9: 1170, 10: 1216,
11: 876, 12: 479, 13: 233, 14: 85, 15: 49, 16: 14, 17: 9, 18: 1}
start_counts = {0: 228, 1: 3095, 2: 1536, 3: 113, 4: 12}
def get_empirical_dist(counts):
prob = []
w, W = 0.0, float(np.sum(list(counts.values())))
for i in sorted(counts):
w += counts[i]
prob.append(w / W)
return [sorted(counts.keys()), [0.0] + prob]
window_dist = get_empirical_dist(window_counts)
start_dist = get_empirical_dist(start_counts)
def sample_empirical(dist):
"""
:param dist:
:return:
"""
x = np.random.random()
keys, values = dist
for i in range(len(dist[-1])):
if x >= values[i] and x < values[i + 1]:
return i
return i - 1
def sample_mri_interval():
"""
Use empircal distribution of SD to generate a
peak value: mean/SD: 0.62573123135359987, 0.23739626787134235
NOTE: If this were real, we'd want to sample a SD distribution instead of a peak value
"""
start = sample_empirical(start_dist)
duration = sample_empirical(window_dist)
peak = (duration / 2) + start
peak_value = np.random.normal(0.62573123135359987, 0.23739626787134235)
return start, duration, peak, round(peak_value, 4)
def get_curve(start, end, peak, peak_max):
"""
Fit curve to 3 points (simulate transitions for generating animations)
:param start:
:param end:
:param peak:
:param peak_max:
:return:
"""
points = np.array([(start, 0), (peak, peak_max), (end, 0)])
# get x and y vectors
x = points[:,0]
y = points[:,1]
# calculate polynomial
z = np.polyfit(x, y, 2)
f = np.poly1d(z)
# calculate new x's and y's
x_new = np.linspace(x[0], x[-1], 100)
y_new = f(x_new)
x_new = map(int, x_new)
xy = []
curr = None
for x,y in zip(x_new, y_new):
if x == curr:
continue
curr = x
xy.append((x,y))
x,y = zip(*xy)
y = [abs(round(y_hat/peak_max,3)) for y_hat in y]
return y
def add_noise(img):
"""
Hack to add noise to images
:param img:
:return:
"""
img = gaussian(img, sigma=0.5, preserve_range=True)
img = random_noise(img, mode='gaussian', var=0.01, mean=0.01)
img = random_noise(img, mode='speckle', var=0.00001)
img = gaussian(img, sigma=0.7)
img = random_noise(img, mode='speckle', var=0.00001)
img = random_noise(img, mode='gaussian', var=0.001, mean=0.0001)
return img
def sample_mri_class(bav_prob=0.5, num_frames=30, width=48, height=48):
"""
:param bav_prob:
:param num_frames:
:param width:
:param height:
:return:
"""
start, duration, peak, peak_max = sample_mri_interval()
curve = get_curve(start, start + duration, peak, peak_max)
curve = ([0.] * start) + curve
curve = curve + [0.] * (num_frames - len(curve))
curve = list(map(lambda x: x ** 2, curve))
size = np.random.uniform(7.0, 9.25)
r_radius, c_radius = np.random.uniform(0.9, 1.0), np.random.uniform(0.9, 1.0)
class_type = False if np.random.random() >= bav_prob else True
r_radius = r_radius * size
c_radius = c_radius * size
seq = np.zeros((num_frames, width, height))
for i in range(num_frames):
cx,cy = [width/2, height/2]
cx += np.random.randint(-1, 1)
cy += np.random.randint(-1, 1)
if i >= start and i < start + duration:
rr, cc = draw.ellipse(cx, cy, r_radius * curve[i] if class_type else r_radius,
c_radius if class_type else c_radius * curve[i])
seq[i][rr, cc] = 1.0 * math.sqrt(curve[i])
seq[i] = add_noise(seq[i])
return seq, class_type
def random_color(prob=0.5, width=48, height=48):
"""
Generate random class from:
1) Black image
2) White image
:param prob:
:param width:
:param height:
:return:
"""
class_type = False if np.random.random() >= prob else True
img = np.zeros((1, width, height)) if class_type else np.ones((1, width, height))
return img, class_type
def random_shape(prob=0.5, width=48, height=48):
"""
Sample random class from:
1) 1 circle
2) 2 circles
:param prob:
:param width:
:param height:
:return:
"""
class_type = False if np.random.random() >= prob else True
col = 0.0 if class_type else 1.0
w = width * np.random.uniform(0.4, 0.8)
if class_type:
img = np.zeros((1, width, height))
mx, my = width / 2, height / 2
cx = mx * np.random.rand()
cy = height * np.random.rand()
rr, cc = draw.circle(cx, cy, radius=np.random.random() * width / 6.0 , shape=img.shape[1:3])
img[0, rr, cc] = 1.0
cx = (width - mx) * np.random.rand()
cy = height * np.random.rand()
rr, cc = draw.circle(cx, cy, radius=np.random.random() * width / 6.0, shape=img.shape[1:3])
img[0, rr, cc] = 1.0
else:
img = np.zeros((1, width, height))
cx, cy = [width / 2, height / 2]
cx += np.random.randint(-width / 4, height / 4)
cy += np.random.randint(-width / 4, height / 4)
rr,cc = draw.circle(cx, cy, radius=np.random.random() * 15, shape=img.shape[1:3])
img[0, rr, cc] = 1.0
img[0] = gaussian(img[0], sigma=0.75, preserve_range=True)
return img, class_type
def generate_random_dataset(outdir, start_id, instance_generator, n_samples=100, prob=0.5, num_frames=15, width=48, height=48, debug=False):
"""
Create random numpy matrices in the same format as our MRI images.
Generate some simple circle shapes to test segmentation.
:param n_samples:
:param dim:
:return:
"""
labels = {}
start_id += 1000000
for i in range(start_id, start_id + n_samples):
fpath = "{}/{}".format(outdir, i)
if instance_generator == "mri":
X, y = sample_mri_class(bav_prob=prob, num_frames=num_frames, width=width, height=height)
elif instance_generator == "bw":
X, y = random_color(prob=prob, width=args.dim, height=args.dim)
else:
X, y = random_shape(prob=prob, width=args.dim, height=args.dim)
X = seq_as_ubyte(X)
if debug:
seq_to_video(X, fpath, width=4, height=4)
np.save(fpath, X)
labels[i] = y
with open("{}/labels.csv".format(outdir),"w") as fp:
fp.write("ID,LABEL\n")
for pid in sorted(labels):
fp.write("{},{}\n".format(pid, int(labels[pid])))
def main(args):
np.random.seed(args.seed)
if not os.path.exists(args.outdir):
logger.error("{} does not exist!".format(args.outdir))
return
if args.full:
for i, dirname in enumerate(["train","dev","test"]):
fpath = "{}/{}".format(args.outdir, dirname)
if not os.path.exists(fpath):
os.mkdir(fpath)
generate_random_dataset(fpath, i * args.samples, instance_generator=args.generator, n_samples=args.samples,
prob=args.prob, num_frames=args.num_frames, width=args.dim, height=args.dim,
debug=args.debug)
else:
generate_random_dataset(args.outdir, 0, instance_generator=args.generator, n_samples=args.samples, prob=args.prob,
num_frames=args.num_frames, width = args.dim, height = args.dim, debug=args.debug)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-o", "--outdir", type=str, default=None, help="save files to outdir")
argparser.add_argument("-n", "--samples", type=int, default=None, help="create n MRI sequences")
argparser.add_argument("--full", action="store_true", help="create full train/validation/test splits")
argparser.add_argument("-I", "--generator", type=str, default="bw", help="bw / shape / MRI sequences")
argparser.add_argument("-F", "--num_frames", type=int, default=30, help="output frame length - default: 30")
argparser.add_argument("-D", "--dim", type=int, default=32, help="output dimension - default: 32x32")
argparser.add_argument("-P", "--prob", type=float, default=0.5, help="prob")
argparser.add_argument("--debug", action="store_true", help="export debug sequences as MP4")
argparser.add_argument("--seed", type=int, default=1234, help="random seed")
FORMAT = '%(levelname)s|%(name)s| %(message)s'
logging.basicConfig(format=FORMAT, stream=sys.stdout, level=logging.INFO)
args = argparser.parse_args()
print_key_pairs(args.__dict__, title="Parameters")
main(args)
| ukb-cardiac-mri-master | ukb/create_synthetic_data.py |
from __future__ import print_function
from __future__ import division
from sklearn.metrics import fbeta_score as sk_fbeta
from functools import wraps
__all__ = ['f05_score',
'f04_score']
def fbeta_create(beta=1.0):
def fbeta_wrapper(func):
@wraps(func)
def fbeta_score(*args, **kwargs):
kwargs.update({"beta": beta})
return sk_fbeta(*args, **kwargs)
return fbeta_score
return fbeta_wrapper
@fbeta_create(beta=0.5)
def f05_score(*args, **kwargs):
pass
@fbeta_create(beta=0.4)
def f04_score(*args, **kwargs):
pass
| ukb-cardiac-mri-master | ukb/metrics/fbeta.py |
from .base import *
from .fbeta import *
| ukb-cardiac-mri-master | ukb/metrics/__init__.py |
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import f1_score, precision_score, recall_score, \
accuracy_score, confusion_matrix, classification_report, log_loss, \
roc_auc_score, roc_curve, precision_recall_curve, auc
__all__ = ["binary_scores_from_counts",
"print_metricatk",
"print_scores",
"classification_summary",
"prc_auc_score",
"dcg_score",
"ndcg_score",
"ndcg_score2",
"f1_score",
"precision_score",
"recall_score",
"accuracy_score",
"confusion_matrix",
"classification_report",
"log_loss",
"roc_auc_score",
"roc_curve",
"precision_recall_curve",
"auc"]
# wrappers for using data loaders to compute standard metrics
def binary_scores_from_counts(ntp, nfp, ntn, nfn):
"""
Precision, recall, and F1 scores from counts of TP, FP, TN, FN.
Example usage:
p, r, f1 = binary_scores_from_counts(*map(len, error_sets))
"""
prec = ntp / float(ntp + nfp) if ntp + nfp > 0 else 0.0
rec = ntp / float(ntp + nfn) if ntp + nfn > 0 else 0.0
f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0
return prec, rec, f1
def print_metricatk(y_true, y_pred, y_proba):
"""
print out the F1/Precision/Recall at k=5,10..
"""
sorted_indexes = np.argsort(y_proba)
print("========================================")
print("Metric at K (5, 10, ...)")
print("========================================")
for k in range(5, y_true.shape[0], 5):
target = sorted_indexes[-k:]
prec = y_true[target].sum()/float(k)
rec = y_true[target].sum()/float(y_true.sum())
f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0
print("At {:>4}: | Precision: {:5.1f} | Recall: {:5.1f} | F1: {:5.1f}".format(k, prec*100.0, rec*100.0, f1*100.0))
if rec == 1:
break
def print_scores(ntp, nfp, ntn, nfn,
pos_acc, neg_acc,
prec, rec, f1, roc, prc,
ndcg, title='Scores'):
print("========================================")
print(title)
print("========================================")
print("Pos. class accuracy: {:2.1f}".format(pos_acc * 100))
print("Neg. class accuracy: {:2.1f}".format(neg_acc * 100))
print("----------------------------------------")
print("AUC: {:2.1f}".format(roc * 100))
print("PRC: {:2.1f}".format(prc * 100))
print("NDCG: {:2.1f}".format(ndcg * 100))
print("----------------------------------------")
print("Precision: {:2.1f}".format(prec * 100))
print("Recall: {:2.1f}".format(rec * 100))
print("F1: {:2.1f}".format(f1 * 100))
print("----------------------------------------")
print("TP: {} | FP: {} | TN: {} | FN: {}".format(ntp, nfp, ntn, nfn))
print("========================================\n")
def classification_summary(y_true, y_pred, classes, y_proba, verbose=True):
"""
Assumes binary classification
:param model:
:param data_loader:
:return:
"""
#print_metricatk(y_true, y_pred, y_proba)
roc = roc_auc_score(y_true, y_proba)
prc = prc_auc_score(y_true, y_proba)
if len(classes) <= 2:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=[0,1]).ravel()
# compute metrics
prec, rec, f1 = binary_scores_from_counts(tp, fp, tn, fn)
pos_acc = tp / float(tp + fn) if tp + fn > 0 else 0.0
neg_acc = tn / float(tn + fp) if tn + fp > 0 else 0.0
ndcg = ndcg_score(y_true, y_proba)
if verbose:
print_scores(tp, fp, tn, fn, pos_acc, neg_acc, prec, rec, f1, roc, prc, ndcg)
header = ["ndcg", "roc", "prc", "precision", "recall", "f1", "pos_acc", "neg_acc", "tp", "fp", "tn", "fn"]
return dict(zip(header,(ndcg, roc, prc, prec, rec, f1, pos_acc, neg_acc, tp, fp, tn, fn)))
else:
print(classification_report(y_true, y_pred, target_names=classes, digits=3))
return {}
def prc_auc_score(y_true, y_prob):
"""
Precision-Recall-Curve Area-Under-Score
"""
precision, recall, _ = precision_recall_curve(y_true, y_prob)
prc_auc = auc(recall, precision)
return prc_auc
def dcg_score(y_true, y_score, k=None):
"""
Function for Discounted Cumulative Gain
"""
k = len(y_true) if k is None else k
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gain = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gain / discounts)
def ndcg_score(y_true, y_score, k=None):
"""
Function for Normalized Discounted Cumulative Gain
"""
y_true, y_score = np.squeeze(y_true), np.squeeze(y_score)
k = len(y_true) if k is None else k
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shapes.")
IDCG = dcg_score(y_true, y_true)
DCG = dcg_score(y_true, y_score)
return DCG/IDCG
def ndcg_score2(y_true, y_score, k=2):
"""
Function for Normalized Discounted Cumulative Gain
Only accepts if y_score is shaped [n_samples, n_classes]
"""
y_true, y_score = np.array(y_true), np.array(y_score)
if y_true.ndim == 1:
y_true = np.expand_dims(y_true, axis=-1)
enc = OneHotEncoder(sparse=False)
oneHot_y_true = enc.fit_transform(y_true)
if oneHot_y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different value ranges")
scores = []
# Iterate over each y_value_true and compute the DCG score
for y_value_true, y_value_score in zip(oneHot_y_true, y_score):
actual = dcg_score(y_value_true, y_value_score, k)
best = dcg_score(y_value_true, y_value_true, k)
scores.append(actual / best)
return np.mean(scores)
| ukb-cardiac-mri-master | ukb/metrics/base.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.