repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
binhna/ESIM | [
"135894d13a14894111e826114596e2317008ccda"
]
| [
"torch_util.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\n\"\"\"\nthis code from https://github.com/easonnie/ResEncoder\n\"\"\"\ndef pad_1d(t, pad_l):\n l = t.size(0)\n if l >= pad_l:\n return t\n else:\n pad_seq = Variable(t.data.new(pad_l - l, *t.size()[1:]).zero_())\n return torch.cat([t, pad_seq], dim=0)\n\ndef pad(t, length, batch_first=False):\n \"\"\"\n Padding the sequence to a fixed length.\n :param t: [B * T * D] or [B * T] if batch_first else [T * B * D] or [T * B]\n :param length: [B]\n :param batch_first:\n :return:\n \"\"\"\n if batch_first:\n # [B * T * D]\n if length <= t.size(1):\n return t\n else:\n batch_size = t.size(0)\n pad_seq = Variable(t.data.new(batch_size, length - t.size(1), *t.size()[2:]).zero_())\n # [B * T * D]\n return torch.cat([t, pad_seq], dim=1)\n else:\n # [T * B * D]\n if length <= t.size(0):\n return t\n else:\n return torch.cat([t, Variable(t.data.new(length - t.size(0), *t.size()[1:]).zero_())])\n\ndef batch_first2time_first(inputs):\n \"\"\"\n Convert input from batch_first to time_first:\n [B * T * D] -> [T * B * D]\n\n :param inputs:\n :return:\n \"\"\"\n return torch.transpose(inputs, 0, 1)\n\ndef time_first2batch_first(inputs):\n \"\"\"\n Convert input from batch_first to time_first:\n [T * B * D] -> [B * T * D]\n\n :param inputs:\n :return:\n \"\"\"\n\n return torch.transpose(inputs, 0, 1)\n\ndef get_state_shape(rnn: nn.RNN, batch_size, bidirectional=False):\n \"\"\"\n Return the state shape of a given RNN. This is helpful when you want to create a init state for RNN.\n\n Example:\n h0 = Variable(src_seq_p.data.new(*get_state_shape(self.encoder, 3, bidirectional)).zero_())\n\n :param rnn:\n :param batch_size:\n :param bidirectional:\n :return:\n \"\"\"\n if bidirectional:\n return rnn.num_layers * 2, batch_size, rnn.hidden_size\n else:\n return rnn.num_layers, batch_size, rnn.hidden_size\n\ndef pack_list_sequence(inputs, l, batch_first=False):\n \"\"\"\n Pack a batch of Tensor into one Tensor.\n :param inputs:\n :param l:\n :return:\n \"\"\"\n batch_list = []\n max_l = max(list(l))\n batch_size = len(inputs)\n\n for b_i in range(batch_size):\n batch_list.append(pad(inputs[b_i], max_l))\n pack_batch_list = torch.stack(batch_list, dim=1) if not batch_first \\\n else torch.stack(batch_list, dim=0)\n return pack_batch_list\n\ndef pack_for_rnn_seq(inputs, lengths, batch_first=False):\n \"\"\"\n :param inputs: [T * B * D]\n :param lengths: [B]\n :return:\n \"\"\"\n if not batch_first:\n _, sorted_indices = lengths.sort()\n '''\n Reverse to decreasing order\n '''\n r_index = reversed(list(sorted_indices))\n\n s_inputs_list = []\n lengths_list = []\n reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)\n\n for j, i in enumerate(r_index):\n s_inputs_list.append(inputs[:, i, :].unsqueeze(1))\n lengths_list.append(lengths[i])\n reverse_indices[i] = j\n\n reverse_indices = list(reverse_indices)\n\n s_inputs = torch.cat(s_inputs_list, 1)\n packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list)\n\n return packed_seq, reverse_indices\n\n else:\n _, sorted_indices = lengths.sort()\n '''\n Reverse to decreasing order\n '''\n r_index = reversed(list(sorted_indices))\n\n s_inputs_list = []\n lengths_list = []\n reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)\n\n for j, i in enumerate(r_index):\n s_inputs_list.append(inputs[i, :, :])\n lengths_list.append(lengths[i])\n reverse_indices[i] = j\n\n reverse_indices = list(reverse_indices)\n\n s_inputs = torch.stack(s_inputs_list, dim=0)\n # print(s_inputs)\n # print(lengths_list)\n packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list, batch_first=batch_first)\n\n return packed_seq, reverse_indices\n\ndef unpack_from_rnn_seq(packed_seq, reverse_indices, batch_first=False):\n unpacked_seq, _ = nn.utils.rnn.pad_packed_sequence(packed_seq, batch_first=batch_first)\n s_inputs_list = []\n\n if not batch_first:\n for i in reverse_indices:\n s_inputs_list.append(unpacked_seq[:, i, :].unsqueeze(1))\n return torch.cat(s_inputs_list, 1)\n else:\n for i in reverse_indices:\n s_inputs_list.append(unpacked_seq[i, :, :].unsqueeze(0))\n return torch.cat(s_inputs_list, 0)\n\ndef auto_rnn(rnn: nn.RNN, seqs, lengths, batch_first=True):\n batch_size = seqs.size(0) if batch_first else seqs.size(1)\n state_shape = get_state_shape(rnn, batch_size, rnn.bidirectional)\n\n h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())\n\n packed_pinputs, r_index = pack_for_rnn_seq(seqs, lengths, batch_first)\n output, (hn, cn) = rnn(packed_pinputs, (h0, c0))\n output = unpack_from_rnn_seq(output, r_index, batch_first)\n\n return output\n\ndef pack_seqence_for_linear(inputs, lengths, batch_first=True):\n \"\"\"\n :param inputs: [B * T * D] if batch_first\n :param lengths: [B]\n :param batch_first:\n :param partition: this is the new batch_size for parallel matrix process.\n :param chuck: partition the output into equal size chucks\n :return:\n \"\"\"\n batch_list = []\n if batch_first:\n for i, l in enumerate(lengths):\n batch_list.append(inputs[i, :l])\n packed_sequence = torch.cat(batch_list, 0)\n # if chuck:\n # return list(torch.chunk(packed_sequence, chuck, dim=0))\n # else:\n return packed_sequence\n\n else:\n raise NotImplemented()\n\ndef chucked_forward(inputs, net, chuck=None):\n if not chuck:\n return net(inputs)\n else:\n output_list = [net(chuck) for chuck in torch.chunk(inputs, chuck, dim=0)]\n return torch.cat(output_list, dim=0)\n\ndef unpack_seqence_for_linear(inputs, lengths, batch_first=True):\n batch_list = []\n max_l = max(lengths)\n\n if not isinstance(inputs, list):\n inputs = [inputs]\n inputs = torch.cat(inputs)\n\n if batch_first:\n start = 0\n for l in lengths:\n end = start + l\n batch_list.append(pad_1d(inputs[start:end], max_l))\n start = end\n return torch.stack(batch_list)\n else:\n raise NotImplemented()\n\ndef auto_rnn_bilstm(lstm: nn.LSTM, seqs, lengths):\n batch_size = seqs.size(1)\n\n state_shape = lstm.num_layers * 2, batch_size, lstm.hidden_size\n\n h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())\n\n packed_pinputs, r_index = pack_for_rnn_seq(seqs, lengths)\n\n output, (hn, cn) = lstm(packed_pinputs, (h0, c0))\n\n output = unpack_from_rnn_seq(output, r_index)\n\n return output\n\ndef auto_rnn_bigru(gru: nn.GRU, seqs, lengths):\n batch_size = seqs.size(1)\n\n state_shape = gru.num_layers * 2, batch_size, gru.hidden_size\n\n h0 = Variable(seqs.data.new(*state_shape).zero_())\n\n packed_pinputs, r_index = pack_for_rnn_seq(seqs, lengths)\n\n output, hn = gru(packed_pinputs, h0)\n\n output = unpack_from_rnn_seq(output, r_index)\n\n return output\n\ndef select_last(inputs, lengths, hidden_size):\n \"\"\"\n :param inputs: [T * B * D] D = 2 * hidden_size\n :param lengths: [B]\n :param hidden_size: dimension\n :return: [B * D]\n \"\"\"\n batch_size = inputs.size(1)\n batch_out_list = []\n for b in range(batch_size):\n batch_out_list.append(torch.cat((inputs[lengths[b] - 1, b, :hidden_size],\n inputs[0, b, hidden_size:])\n )\n )\n\n out = torch.stack(batch_out_list)\n return out\n\ndef matching_matrix(s1, s2):\n \"\"\"\n :param s1: [T * B * D]\n :param s2: [T * B * D]\n :return: [B * T * T]\n \"\"\"\n b_s1 = s1.transpose(0, 1) # [B * T * D]\n b_s2 = s2.transpose(0, 1) # [B * T * D]\n\n matrix = torch.bmm(b_s1, b_s2.transpose(1, 2))\n return matrix\n\ndef sequence_matrix_cross_alignment(s1, s2, l1, l2, matrix):\n \"\"\"\n :param s1: [T * B * D]\n :param s2: [T * B * D]\n :param l1: [B]\n :param l2: [B]\n :param matrix: [B * T * T]\n :return:\n \"\"\"\n\n b_s1 = s1.transpose(0, 1) # [B * T * D]\n b_s2 = s2.transpose(0, 1) # [B * T * D]\n dim = b_s1.size(2)\n batch_size = b_s1.size(0)\n\n b_wsum_a2to1_list = []\n b_wsum_a1to2_list = []\n\n for b in range(batch_size):\n b_m = matrix[b]\n ba_s1 = b_s1[b]\n ba_s2 = b_s2[b]\n\n # # align s2 to s1 _a2to1\n exp_b_m_a2to1 = F.softmax(b_m[:l1[b], :l2[b]]) # [t1 * t2]\n\n b_weight_a2to1 = exp_b_m_a2to1.unsqueeze(2).expand(l1[b], l2[b], dim) # [t1 * t2] -> [t1 * t2 * dim]\n b_seq_a2to1 = ba_s2[:l2[b]].unsqueeze(0).expand(l1[b], l2[b], dim) # [t2 * dim] -> [t1 * t2 * dim]\n\n b_wsum_a2to1 = torch.sum(b_weight_a2to1 * b_seq_a2to1, dim=1).squeeze(1) # [t1 * d]\n b_wsum_a2to1_list.append(b_wsum_a2to1)\n\n # # align s1 to s2 _a1to2\n exp_b_m_a1to2 = F.softmax(b_m[:l1[b], :l2[b]].transpose(0, 1)) # [t2 * t1]\n\n b_weight_a1to2 = exp_b_m_a1to2.unsqueeze(2).expand(l2[b], l1[b], dim) # [t2 * t1] -> [t2 * t1 * d]\n b_seq_a1to2 = ba_s1[:l1[b]].unsqueeze(0).expand(l2[b], l1[b], dim) # [t1 * dim] -> [t2 * t1 * dim]\n\n b_wsum_a1to2 = torch.sum(b_weight_a1to2 * b_seq_a1to2, dim=1).squeeze(1) # [t2 * d]\n b_wsum_a1to2_list.append(b_wsum_a1to2)\n # print(b_wsum_a1to2)\n\n align_s2_to_s1 = pack_list_sequence(b_wsum_a2to1_list, l1)\n align_s1_to_s2 = pack_list_sequence(b_wsum_a1to2_list, l2)\n\n return align_s2_to_s1, align_s1_to_s2\n\ndef channel_weighted_sum(s, w, l, sharpen=None):\n batch_size = w.size(1)\n result_list = []\n for b_i in range(batch_size):\n if sharpen:\n b_w = w[:l[b_i], b_i, :] * sharpen\n else:\n b_w = w[:l[b_i], b_i, :]\n b_s = s[:l[b_i], b_i, :] # T, D\n soft_b_w = F.softmax(b_w.transpose(0, 1)).transpose(0, 1)\n # print(soft_b_w)\n # print('soft:', )\n # print(soft_b_w)\n result_list.append(torch.sum(soft_b_w * b_s, dim=0)) # [T, D] -> [1, D]\n return torch.cat(result_list, dim=0)\n\ndef topk_weighted_sum(s, w, k, l):\n batch_size = w.size(1)\n result_list = []\n for b_i in range(batch_size):\n # print(w.size())\n # print(l[b_i])\n b_w = w[:l[b_i], b_i, :]\n b_s = s[:l[b_i], b_i, :] # T, D\n if l[b_i] == 1:\n b_topk, b_topk_indices = b_s.max(dim=0)\n elif l[b_i] < k:\n b_topk, b_topk_indices = torch.topk(b_s, l[b_i], dim=0)\n else:\n b_topk, b_topk_indices = torch.topk(b_s, k, dim=0)\n\n b_topk_w = torch.gather(b_w, 0, b_topk_indices)\n soft_b_topk_w = F.softmax(b_topk_w.transpose(0, 1)).transpose(0, 1)\n result_list.append(torch.sum(soft_b_topk_w * b_topk, dim=0))\n return torch.cat(result_list, dim=0)\n\ndef topk_dp_weighted_sum(s, w, l):\n batch_size = w.size(1)\n result_list = []\n for b_i in range(batch_size):\n # print(w.size())\n # print(l[b_i])\n\n # Dynamic pooling\n\n k = (int(l[b_i] - 1) // 10) + 1\n\n b_w = w[:l[b_i], b_i, :]\n b_s = s[:l[b_i], b_i, :] # T, D\n if l[b_i] == 1:\n b_topk, b_topk_indices = b_s.max(dim=0)\n elif l[b_i] < k:\n b_topk, b_topk_indices = torch.topk(b_s, l[b_i], dim=0)\n else:\n b_topk, b_topk_indices = torch.topk(b_s, k, dim=0)\n\n b_topk_w = torch.gather(b_w, 0, b_topk_indices)\n soft_b_topk_w = F.softmax(b_topk_w.transpose(0, 1)).transpose(0, 1)\n result_list.append(torch.sum(soft_b_topk_w * b_topk, dim=0))\n return torch.cat(result_list, dim=0)\n\ndef pack_to_matching_matrix(s1, s2, cat_only=[False, False]):\n t1 = s1.size(0)\n t2 = s2.size(0)\n batch_size = s1.size(1)\n d = s1.size(2)\n\n expanded_p_s1 = s1.expand(t2, t1, batch_size, d)\n\n expanded_p_s2 = s2.view(t2, 1, batch_size, d)\n expanded_p_s2 = expanded_p_s2.expand(t2, t1, batch_size, d)\n\n if not cat_only[0] and not cat_only[1]:\n matrix = torch.cat((expanded_p_s1, expanded_p_s2), dim=3)\n elif not cat_only[0] and cat_only[1]:\n matrix = torch.cat((expanded_p_s1, expanded_p_s2, expanded_p_s1 * expanded_p_s2), dim=3)\n else:\n matrix = torch.cat((expanded_p_s1,\n expanded_p_s2,\n torch.abs(expanded_p_s1 - expanded_p_s2),\n expanded_p_s1 * expanded_p_s2), dim=3)\n\n # matrix = torch.cat((expanded_p_s1,\n # expanded_p_s2), dim=3)\n\n return matrix\n\ndef max_matching(gram_matrix, l1, l2):\n batch_size = gram_matrix.size(2)\n dim = gram_matrix.size(3)\n in_d = dim // 4\n\n t1_seq = []\n t2_seq = []\n for b_i in range(batch_size):\n b_m = gram_matrix[:l2[b_i], :l1[b_i], b_i, :]\n max_t1_a, _ = torch.max(b_m, dim=0)\n max_t2_a, _ = torch.max(b_m, dim=1)\n\n t1_seq.append(max_t1_a.view(l1[b_i], -1)) # [T1, B, 4D]\n t2_seq.append(max_t2_a.view(l2[b_i], -1)) # [T2, B, 4D]\n\n s1_seq = pack_list_sequence(t1_seq, l1)\n s2_seq = pack_list_sequence(t2_seq, l2)\n filp_l = [s2_seq[:, :, in_d:in_d * 2], s2_seq[:, :, :in_d], s2_seq[:, :, in_d * 2:]]\n s2_seq = torch.cat(filp_l, dim=2)\n\n return s1_seq, s2_seq\n\ndef max_over_grammatrix(inputs, l1, l2):\n \"\"\"\n :param inputs: [T2 * T1 * B * D]\n :param l1:\n :param l2:\n :return:\n \"\"\"\n batch_size = inputs.size(2)\n max_out_list = []\n for b in range(batch_size):\n b_gram_matrix = inputs[:l2[b], :l1[b], b, :]\n dim = b_gram_matrix.size(-1)\n\n b_max, _ = torch.max(b_gram_matrix.contiguous().view(-1, dim), dim=0)\n\n max_out_list.append(b_max)\n\n max_out = torch.cat(max_out_list, dim=0)\n return max_out\n\ndef comparing_conv(matrices, l1, l2, conv_filter: nn.Linear, k_size, dropout=None,\n padding=True, list_in=False):\n \"\"\"\n :param conv_filter: [k * k * input_d]\n :param k_size:\n :param dropout:\n :return:\n \"\"\"\n k = k_size\n\n if list_in is False:\n batch_size = matrices.size(2)\n windows = []\n for b in range(batch_size):\n b_matrix = matrices[:l2[b], :l1[b], b, :]\n\n if not padding:\n if l2[b] - k + 1 <= 0 or l1[b] - k + 1 <= 0:\n raise Exception('Kernel size error k={0}, matrix=({1},{2})'.format(k, l2[b], l1[b]))\n\n for i in range(l2[b] - k + 1):\n for j in range(l1[b] - k + 1):\n window = b_matrix[i:i + k, j:j + k, :]\n window_d = window.size(-1)\n windows.append(window.contiguous().view(k * k * window_d))\n else:\n ch_d = b_matrix.size(-1)\n padding_n = (k - 1) // 2\n row_pad = Variable(torch.zeros(padding_n, l1[b], ch_d))\n\n if torch.cuda.is_available():\n row_pad = row_pad.cuda()\n # print(b_matrix)\n # print(row_pad)\n after_row_pad = torch.cat([row_pad, b_matrix, row_pad], dim=0)\n col_pad = Variable(torch.zeros(l2[b] + 2 * padding_n, padding_n, ch_d))\n if torch.cuda.is_available():\n col_pad = col_pad.cuda()\n after_col_pad = torch.cat([col_pad, after_row_pad, col_pad], dim=1)\n\n for i in range(padding_n, padding_n + l2[b]):\n for j in range(padding_n, padding_n + l1[b]):\n i_start = i - padding_n\n j_start = j - padding_n\n window = after_col_pad[i_start:i_start + k, j_start:j_start + k, :]\n windows.append(window.contiguous().view(k * k * ch_d))\n\n windows = torch.stack(windows)\n else:\n batch_size = len(matrices)\n windows = []\n for b in range(batch_size):\n b_matrix = matrices[b]\n b_l2 = b_matrix.size(0)\n b_l1 = b_matrix.size(1)\n\n if not padding:\n if l1 is not None and l2 is not None and (l2[b] != b_l2 or l1[b] != b_l1):\n raise Exception('Possible input matrices size error!')\n\n if b_l2 - k + 1 <= 0 or b_l1 - k + 1 <= 0:\n raise Exception('Kernel size error k={0}, matrix=({1},{2})'.format(k, l2[b], l1[b]))\n\n for i in range(b_l2 - k + 1):\n for j in range(b_l1 - k + 1):\n window = b_matrix[i:i + k, j:j + k, :]\n window_d = window.size(-1)\n windows.append(window.contiguous().view(k * k * window_d))\n else:\n if l1 is not None and l2 is not None and (l2[b] != b_l2 or l1[b] != b_l1):\n raise Exception('Possible input matrices size error!')\n\n ch_d = b_matrix.size(-1)\n padding_n = (k - 1) // 2\n row_pad = Variable(torch.zeros(padding_n, b_l1, ch_d))\n if torch.cuda.is_available():\n row_pad = row_pad.cuda()\n after_row_pad = torch.cat([row_pad, b_matrix, row_pad], dim=0)\n col_pad = Variable(torch.zeros(b_l2 + 2 * padding_n, padding_n, ch_d))\n if torch.cuda.is_available():\n col_pad = col_pad.cuda()\n after_col_pad = torch.cat([col_pad, after_row_pad, col_pad], dim=1)\n\n for i in range(padding_n, padding_n + b_l2):\n for j in range(padding_n, padding_n + b_l1):\n i_start = i - padding_n\n j_start = j - padding_n\n window = after_col_pad[i_start:i_start + k, j_start:j_start + k, :]\n windows.append(window.contiguous().view(k * k * ch_d))\n\n windows = torch.stack(windows)\n\n if dropout:\n dropout(windows)\n\n # print(windows)\n\n out_windows = conv_filter(windows)\n a, b = torch.chunk(out_windows, 2, dim=1)\n out = a * F.sigmoid(b)\n\n out_list = []\n max_out_list = []\n i = 0\n for b in range(batch_size):\n\n if not padding:\n c_l2 = l2[b] - k + 1\n c_l1 = l1[b] - k + 1\n else:\n c_l2 = l2[b]\n c_l1 = l1[b]\n\n b_end = i + c_l2 * c_l1\n b_matrix = out[i:b_end, :]\n\n max_out, _ = b_matrix.max(dim=0)\n max_out_list.append(max_out.squeeze())\n\n dim = b_matrix.size(-1)\n out_list.append(b_matrix.view(c_l2, c_l1, dim))\n i = b_end\n\n max_out = torch.stack(max_out_list)\n # for out in out_list:\n # max_out = torch.max(out.view(1, -1))\n\n return out_list, max_out\n\ndef max_along_time(inputs, lengths, list_in=False, batch_first=False):\n \"\"\"\n :param inputs: [T * B * D]\n :param lengths: [B]\n :return: [B * D] max_along_time\n \"\"\"\n ls = list(lengths)\n\n if not batch_first:\n if not list_in:\n b_seq_max_list = []\n for i, l in enumerate(ls):\n seq_i = inputs[:l, i, :]\n seq_i_max, _ = seq_i.max(dim=0)\n seq_i_max = seq_i_max.squeeze()\n b_seq_max_list.append(seq_i_max)\n\n return torch.stack(b_seq_max_list)\n else:\n b_seq_max_list = []\n for i, l in enumerate(ls):\n seq_i = inputs[i]\n seq_i_max, _ = seq_i.max(dim=0)\n seq_i_max = seq_i_max.squeeze()\n b_seq_max_list.append(seq_i_max)\n\n return torch.stack(b_seq_max_list)\n else:\n b_seq_max_list = []\n for i, l in enumerate(ls):\n seq_i = inputs[i, :l, :]\n seq_i_max, _ = seq_i.max(dim=0)\n seq_i_max = seq_i_max.squeeze()\n b_seq_max_list.append(seq_i_max)\n\n return torch.stack(b_seq_max_list)\n\ndef topk_along_time(inputs, k, lengths):\n \"\"\"\n :param inputs: [T * B * D]\n :param lengths: [B]\n :return: [B * D] max_along_time\n \"\"\"\n ls = list(lengths)\n d = inputs.size(-1)\n pad_z = Variable(inputs.data.new(1, d).zero_())\n\n b_seq_max_list = []\n for i, l in enumerate(ls):\n seq_i = inputs[:l, i, :]\n if l == 1:\n seq_i = torch.cat((seq_i, pad_z), dim=0)\n seq_i_topk, _ = torch.topk(seq_i, k, dim=0)\n b_seq_max_list.append(seq_i_topk.view(1, -1))\n\n return torch.cat(b_seq_max_list)\n\ndef topk_avg_along_time(inputs, k, lengths, list_in=False):\n ls = list(lengths)\n\n b_seq_max_list = []\n for i, l in enumerate(ls):\n seq_i = inputs[:l, i, :] if not list_in else inputs[i]\n if l == 1:\n seq_i_topk_avg, _ = seq_i.max(dim=0)\n elif k > l:\n seq_i_topk, _ = torch.topk(seq_i, l, dim=0)\n seq_i_topk_avg = torch.sum(seq_i_topk, dim=0) / l\n else:\n seq_i_topk, _ = torch.topk(seq_i, k, dim=0)\n seq_i_topk_avg = torch.sum(seq_i_topk, dim=0) / k\n b_seq_max_list.append(seq_i_topk_avg)\n\n return torch.cat(b_seq_max_list)\n\ndef comparing_conv_m(inputs, l1, l2, conv_layer: nn.Conv2d, mask_2d):\n batch_size = inputs.size(0)\n unit_d = conv_layer.out_channels // 2\n conv_out = conv_layer(inputs)\n\n a, b = torch.chunk(conv_out, 2, dim=1)\n gated_conv_out = a * F.sigmoid(b) * mask_2d[:, :unit_d, :, :]\n\n max_out_list = []\n for b_i in range(batch_size):\n b_conv_out = gated_conv_out[b_i, :, :l2[b_i], :l1[b_i]]\n max_out, _ = torch.max(b_conv_out.contiguous().view(unit_d, -1), dim=1)\n # print(b_conv_out.size())\n max_out_list.append(max_out.squeeze(1))\n max_out = torch.stack(max_out_list)\n\n return gated_conv_out, max_out\n\ndef text_conv1d(inputs, l1, conv_filter: nn.Linear, k_size, dropout=None, list_in=False,\n gate_way=True):\n \"\"\"\n :param inputs: [T * B * D]\n :param l1: [B]\n :param conv_filter: [k * D_in, D_out * 2]\n :param k_size:\n :param dropout:\n :param padding:\n :param list_in:\n :return:\n \"\"\"\n k = k_size\n batch_size = l1.size(0)\n d_in = inputs.size(2) if not list_in else inputs[0].size(1)\n unit_d = conv_filter.out_features // 2\n pad_n = (k - 1) // 2\n\n zeros_padding = Variable(inputs[0].data.new(pad_n, d_in).zero_())\n\n batch_list = []\n input_list = []\n for b_i in range(batch_size):\n masked_in = inputs[:l1[b_i], b_i, :] if not list_in else inputs[b_i]\n if gate_way:\n input_list.append(masked_in)\n\n b_inputs = torch.cat([zeros_padding, masked_in, zeros_padding], dim=0)\n for i in range(l1[b_i]):\n # print(b_inputs[i:i+k])\n batch_list.append(b_inputs[i:i + k].view(k * d_in))\n\n batch_in = torch.stack(batch_list, dim=0)\n a, b = torch.chunk(conv_filter(batch_in), 2, 1)\n out = a * F.sigmoid(b)\n\n out_list = []\n start = 0\n for b_i in range(batch_size):\n if gate_way:\n out_list.append(torch.cat((input_list[b_i], out[start:start + l1[b_i]]), dim=1))\n else:\n out_list.append(out[start:start + l1[b_i]])\n\n start = start + l1[b_i]\n\n # max_out_list = []\n # for b_i in range(batch_size):\n # max_out, _ = torch.max(out_list[b_i], dim=0)\n # max_out_list.append(max_out)\n # max_out = torch.cat(max_out_list, 0)\n #\n # print(out_list)\n\n return out_list\n\n# Test something"
]
| [
[
"torch.zeros",
"torch.nn.functional.sigmoid",
"torch.cat",
"torch.stack",
"torch.gather",
"torch.max",
"torch.sum",
"torch.abs",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.cuda.is_available",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.functional.softmax",
"torch.transpose",
"torch.chunk",
"torch.topk"
]
]
|
ethanlighter/crnn_ctc_tf2 | [
"2995dfb9fd07e2e3a9a0cb8563b3159323a2e757"
]
| [
"tools/demo.py"
]
| [
"\nfrom dataset.tf_data_handler import tf_data_handler\nimport tensorflow as tf\nfrom config import Config\nimport time\nimport os\nfrom model.model import vgg_crnn\nfrom tools.utils import ctc_decode\nfrom tools.utils import map_to_text\nimport cv2\n# tf_config = tf.ConfigProto()\n# tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 分配50%\n# tf_config.gpu_options.allow_growth = True # 自适应\n# session = tf.Session(config=tf_config)\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nmodel_file = \"/home/ethony/github_work/crnn_ctc_tf2/checkpoint/epoch_20_model\"\nmodel = vgg_crnn()\nmodel.load_weights(model_file)\ndef demo(img_path):\n img = tf.io.read_file(img_path)\n img = tf.io.decode_jpeg(img, channels=1)\n\n img_shape = tf.shape(img)\n scale_factor = Config.des_img_shape[0] / img_shape[0]\n img_width = scale_factor * tf.cast(img_shape[1], tf.float64)\n img_width = tf.cast(img_width, tf.int32)\n img = tf.image.resize(img, (Config.des_img_shape[0], img_width)) / 255.0\n img = tf.expand_dims(img,axis=0)\n pred = model(img)\n pre_index = ctc_decode(pred)\n text = map_to_text(pre_index[0])\n print(text)\nif __name__ == \"__main__\":\n test_path = \"/home/ethony/github_work/crnn_ctc_tf2/temp/ture_test_imgs\"\n for item in os.listdir(test_path)[:100]:\n if item.endswith(\"jpg\"):\n img_path = os.path.join(test_path,item)\n item_img = cv2.imread(img_path)\n cv2.imshow(\"item_img\",item_img)\n # start_time = time.time()\n # print(img_path)\n demo(img_path)\n cv2.waitKey(0)\n # print(time.time() - start_time)"
]
| [
[
"tensorflow.io.decode_jpeg",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.io.read_file",
"tensorflow.image.resize",
"tensorflow.cast"
]
]
|
dv-123/Smart-Road-Assistant | [
"9b6222f23f030dd89e03e1c38030cd9e90eaffd4"
]
| [
"YOLO_with_image.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 20 23:45:13 2019\r\n\r\n@author: bhaik\r\n\"\"\"\r\n\r\n#%%\r\n# importing required libraries\r\n\r\nimport argparse\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import imshow\r\nimport scipy.io\r\nimport scipy.misc\r\nimport numpy as np\r\nimport pandas as pd\r\nimport PIL\r\nimport cv2\r\nimport tensorflow as tf\r\nfrom keras import backend as K\r\nfrom keras.layers import Input, Lambda, Conv2D\r\nfrom keras.models import load_model, Model\r\nfrom yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes\r\nfrom yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body\r\n\r\n#%%\r\n\r\n# creating yolo filter boxes\r\n\r\ndef yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):\r\n \r\n '''\r\n Filters the classified YOLO boxes by thresholding on the objects classified and on the basis of box_confidence\r\n \r\n Returns values of the filter:\r\n scores -- tensor of shape (None,), containing the class probability score for selected boxes\r\n boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes\r\n classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes\r\n \r\n Note: \"None\" is here because we don't know the exact number of selected boxes, as it depends on the threshold. \r\n For example, the actual output size of scores would be (10,) if there are 10 boxes.\r\n \r\n '''\r\n \r\n # Computing the box scores\r\n box_scores = np.multiply(box_confidence,box_class_probs)\r\n \r\n # Finding the box_classes thanks to the max box_scores, keep track of the corresponding score\r\n box_classes = K.argmax(box_scores, axis=-1) # gives the index of the maximum number in the obtained box_score matrix/array (1D)\r\n box_class_scores = K.max(box_scores, axis=-1) # gives the maximum value from the box_score\r\n \r\n # Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\r\n # same dimension as box_class_scores, and the boxes with probability >= threshold are classified as True or 1 and remaining False or 0\r\n filtering_mask = K.greater_equal(box_class_scores, threshold)\r\n \r\n # Applying the mask to scores, boxes and classes\r\n scores = tf.boolean_mask(box_class_scores,filtering_mask) # the scores grater than corresponding threshold are selected\r\n boxes = tf.boolean_mask(boxes,filtering_mask) # the boxes grater than corresponding threshold are selected\r\n classes = tf.boolean_mask(box_classes,filtering_mask) # the classes grater than corresponding threshold are selected\r\n \r\n return scores, boxes, classes\r\n#%%\r\n\r\n# Creating Intersection / Union Function --> IOU\r\n\r\ndef iou(box1,box2):\r\n ''' This function implements Intersection Over Union\r\n \r\n The corresponding dimenssions of box1 and box2 are:\r\n box1 --> (x1, y1, x2, y2)\r\n box2 --> (x1, y1, x2, y2)\r\n '''\r\n # calculation the area of intersection\r\n xi1 = max(box1[0],box2[0])\r\n yi1 = max(box1[1],box2[1])\r\n xi2 = min(box1[2],box2[2])\r\n yi2 = min(box1[3],box2[3])\r\n inter_area = max((yi2-yi1),0)* max((xi2-xi1),0)\r\n \r\n # the max between 0 and the actual area is taken because we will not consider the negative values.\r\n \r\n # Calculating the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)\r\n box1_area = (box1[3]-box1[1])*(box1[2]-box1[0])\r\n box2_area = (box2[3]-box2[1])*(box2[2]-box2[0])\r\n union_area = (box1_area+box2_area)-inter_area\r\n \r\n # computing the iou\r\n iou = inter_area/union_area\r\n \r\n return iou\r\n#%%\r\n \r\n# making the YOLO non-max supression function to remove the multiple uotputs of the same detected object\r\n\r\ndef yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.6):\r\n \r\n '''\r\n Returns:\r\n scores -- tensor of shape (, None), predicted score for each box\r\n boxes -- tensor of shape (4, None), predicted box coordinates\r\n classes -- tensor of shape (, None), predicted class for each box\r\n \r\n these will be the final classified boxes\r\n \r\n '''\r\n \r\n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\r\n K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor\r\n \r\n # Using predefined function tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\r\n nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold=iou_threshold)\r\n \r\n # Using K.gather() to select only nms_indices from scores, boxes and classes\r\n scores = K.gather(scores,nms_indices)\r\n boxes = K.gather(boxes,nms_indices)\r\n classes = K.gather(classes,nms_indices)\r\n \r\n return scores, boxes, classes\r\n#%%\r\n \r\n# creating the yolo exaluation function\r\n\r\ndef yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):\r\n '''\r\n This function converts the output of YOLO encoding (a lot of boxes) to our predicted boxes along with their scores, box coordinates and classes.\r\n \r\n One main argument to keep in mind is fron the yolo trained model -->\r\n yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:\r\n box_confidence: tensor of shape (None, 19, 19, 5, 1)\r\n box_xy: tensor of shape (None, 19, 19, 5, 2)\r\n box_wh: tensor of shape (None, 19, 19, 5, 2)\r\n \r\n the finction will return following values -->\r\n cores -- tensor of shape (None, ), predicted score for each box\r\n boxes -- tensor of shape (None, 4), predicted box coordinates\r\n classes -- tensor of shape (None,), predicted class for each box\r\n '''\r\n \r\n # first we will be retriving the outputs from the yolu outputs of the model\r\n box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs\r\n \r\n # Converting boxes to be ready for filtering functions \r\n boxes = yolo_boxes_to_corners(box_xy, box_wh)\r\n # this function is imported from yad2k.models.keras_yolo\r\n \r\n # filtering the boxes\r\n scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold)\r\n \r\n # scaling the boxes back to original image shape\r\n boxes = scale_boxes(boxes, image_shape)\r\n # this is imported from yolo_utils\r\n \r\n # using the non-max supression on the filtered bosex\r\n scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes = max_boxes, iou_threshold = iou_threshold)\r\n \r\n return scores, boxes, classes\r\n#%%\r\n \r\n# initialising the session\r\nsess = K.get_session()\r\n\r\n# gathering the class_names, anchors from model_data\r\nclass_names = read_classes(\"model_data/coco_classes.txt\")\r\nanchors = read_anchors(\"model_data/yolo_anchors.txt\")\r\n\r\n# specifying the image shape\r\nimage_shape = (720., 1280.)\r\n\r\n# loading the yolo pretrained model\r\nprint(\"loading YOLOV2 weights/model\")\r\nyolo_model = load_model(\"model_data/yolo.h5\")\r\nprint(\"model loaded\")\r\n\r\n# model summary\r\nyolo_model.summary()\r\n\r\n# classifying the yolo outputs from the model with class names\r\nyolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\r\n\r\n# applying the yolo_eval functions on yolo_outputs\r\nscores, boxes, classes = yolo_eval(yolo_outputs, image_shape)\r\n#%%\r\n\r\n# making the predict function\r\ndef predict(sess, image_file):\r\n \r\n '''\r\n The function will run the graph stored in \"sess\" to predict boxes for \"image_file\". Prints and plots the preditions.\r\n '''\r\n \r\n # processing the image data\r\n image, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\r\n \r\n # Runing the session with the correct tensors and choose the correct placeholders in the feed_dict.\r\n # We will need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})\r\n out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})\r\n \r\n # Print predictions info\r\n print('Found {} boxes for {}'.format(len(out_boxes), image_file))\r\n \r\n # Generating colors for drawing bounding boxes.\r\n colors = generate_colors(class_names)\r\n \r\n # Draw bounding boxes on the image file\r\n draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)\r\n \r\n # saving the image\r\n image.save(os.path.join(\"out\", image_file), quality=90)\r\n \r\n # displaying the image\r\n img = cv2.imread(os.path.join(\"out\", \"test.jpg\"),1)\r\n cv2.imshow('output',img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n \r\n return out_scores, out_boxes, out_classes\r\n \r\n#%%\r\n# running the predict function\r\nout_scores, out_boxes, out_classes = predict(sess, \"test.jpg\")\r\n\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n "
]
| [
[
"numpy.multiply",
"tensorflow.variables_initializer",
"tensorflow.boolean_mask",
"tensorflow.image.non_max_suppression"
]
]
|
Salompas/pandas | [
"2ebab98cf1f36a678256436e7a4f5149536436c9"
]
| [
"pandas/core/arrays/datetimelike.py"
]
| [
"from datetime import datetime, timedelta\nimport operator\nfrom typing import Any, Sequence, Type, Union, cast\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib\nfrom pandas._libs.tslibs.c_timestamp import maybe_integer_op_deprecated\nfrom pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period\nfrom pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds\nfrom pandas._libs.tslibs.timestamps import RoundTo, round_nsint64\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning\nfrom pandas.util._decorators import Appender, Substitution\nfrom pandas.util._validators import validate_fillna_kwargs\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n is_offsetlike,\n is_period_dtype,\n is_string_dtype,\n is_timedelta64_dtype,\n is_unsigned_integer_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndexClass,\n ABCPeriodArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import is_array_like\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna\n\nfrom pandas._typing import DatetimeLikeScalar\nfrom pandas.core import missing, nanops\nfrom pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts\nimport pandas.core.common as com\nfrom pandas.core.ops.invalid import make_invalid_op\n\nfrom pandas.tseries import frequencies\nfrom pandas.tseries.offsets import DateOffset, Tick\n\nfrom .base import ExtensionArray, ExtensionOpsMixin\n\n\nclass AttributesMixin:\n _data = None # type: np.ndarray\n\n @classmethod\n def _simple_new(cls, values, **kwargs):\n raise AbstractMethodError(cls)\n\n @property\n def _scalar_type(self) -> Type[DatetimeLikeScalar]:\n \"\"\"The scalar associated with this datelike\n\n * PeriodArray : Period\n * DatetimeArray : Timestamp\n * TimedeltaArray : Timedelta\n \"\"\"\n raise AbstractMethodError(self)\n\n def _scalar_from_string(\n self, value: str\n ) -> Union[Period, Timestamp, Timedelta, NaTType]:\n \"\"\"\n Construct a scalar type from a string.\n\n Parameters\n ----------\n value : str\n\n Returns\n -------\n Period, Timestamp, or Timedelta, or NaT\n Whatever the type of ``self._scalar_type`` is.\n\n Notes\n -----\n This should call ``self._check_compatible_with`` before\n unboxing the result.\n \"\"\"\n raise AbstractMethodError(self)\n\n def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:\n \"\"\"\n Unbox the integer value of a scalar `value`.\n\n Parameters\n ----------\n value : Union[Period, Timestamp, Timedelta]\n\n Returns\n -------\n int\n\n Examples\n --------\n >>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP\n 10000000000\n \"\"\"\n raise AbstractMethodError(self)\n\n def _check_compatible_with(\n self, other: Union[Period, Timestamp, Timedelta, NaTType]\n ) -> None:\n \"\"\"\n Verify that `self` and `other` are compatible.\n\n * DatetimeArray verifies that the timezones (if any) match\n * PeriodArray verifies that the freq matches\n * Timedelta has no verification\n\n In each case, NaT is considered compatible.\n\n Parameters\n ----------\n other\n\n Raises\n ------\n Exception\n \"\"\"\n raise AbstractMethodError(self)\n\n\nclass DatelikeOps:\n \"\"\"\n Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.\n \"\"\"\n\n @Substitution(\n URL=\"https://docs.python.org/3/library/datetime.html\"\n \"#strftime-and-strptime-behavior\"\n )\n def strftime(self, date_format):\n \"\"\"\n Convert to Index using specified date_format.\n\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format\n doc <%(URL)s>`__.\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%%Y-%%m-%%d\").\n\n Returns\n -------\n ndarray\n NumPy ndarray of formatted strings.\n\n See Also\n --------\n to_datetime : Convert the given argument to datetime.\n DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.\n DatetimeIndex.round : Round the DatetimeIndex to the specified freq.\n DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.\n\n Examples\n --------\n >>> rng = pd.date_range(pd.Timestamp(\"2018-03-10 09:00\"),\n ... periods=3, freq='s')\n >>> rng.strftime('%%B %%d, %%Y, %%r')\n Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',\n 'March 10, 2018, 09:00:02 AM'],\n dtype='object')\n \"\"\"\n return self._format_native_types(date_format=date_format).astype(object)\n\n\nclass TimelikeOps:\n \"\"\"\n Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.\n \"\"\"\n\n _round_doc = \"\"\"\n Perform {op} operation on the data to the specified `freq`.\n\n Parameters\n ----------\n freq : str or Offset\n The frequency level to {op} the index to. Must be a fixed\n frequency like 'S' (second) not 'ME' (month end). See\n :ref:`frequency aliases <timeseries.offset_aliases>` for\n a list of possible `freq` values.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n Only relevant for DatetimeIndex:\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n\n .. versionadded:: 0.24.0\n\n nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \\\ndefault 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DatetimeIndex, TimedeltaIndex, or Series\n Index of the same type for a DatetimeIndex or TimedeltaIndex,\n or a Series with the same index for a Series.\n\n Raises\n ------\n ValueError if the `freq` cannot be converted.\n\n Examples\n --------\n **DatetimeIndex**\n\n >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')\n >>> rng\n DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',\n '2018-01-01 12:01:00'],\n dtype='datetime64[ns]', freq='T')\n \"\"\"\n\n _round_example = \"\"\">>> rng.round('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.round(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n _floor_example = \"\"\">>> rng.floor('H')\n DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.floor(\"H\")\n 0 2018-01-01 11:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n _ceil_example = \"\"\">>> rng.ceil('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 13:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.ceil(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 13:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n def _round(self, freq, mode, ambiguous, nonexistent):\n # round the local times\n values = _ensure_datetimelike_to_i8(self)\n result = round_nsint64(values, mode, freq)\n result = self._maybe_mask_results(result, fill_value=NaT)\n\n dtype = self.dtype\n if is_datetime64tz_dtype(self):\n dtype = None\n return self._ensure_localized(\n self._simple_new(result, dtype=dtype), ambiguous, nonexistent\n )\n\n @Appender((_round_doc + _round_example).format(op=\"round\"))\n def round(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)\n\n @Appender((_round_doc + _floor_example).format(op=\"floor\"))\n def floor(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)\n\n @Appender((_round_doc + _ceil_example).format(op=\"ceil\"))\n def ceil(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)\n\n\nclass DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):\n \"\"\"\n Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray\n\n Assumes that __new__/__init__ defines:\n _data\n _freq\n\n and that the inheriting class has methods:\n _generate_range\n \"\"\"\n\n @property\n def _box_func(self):\n \"\"\"\n box function to get object from internal representation\n \"\"\"\n raise AbstractMethodError(self)\n\n def _box_values(self, values):\n \"\"\"\n apply box func to passed values\n \"\"\"\n return lib.map_infer(values, self._box_func)\n\n def __iter__(self):\n return (self._box_func(v) for v in self.asi8)\n\n @property\n def asi8(self) -> np.ndarray:\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n # do not cache or you'll create a memory leak\n return self._data.view(\"i8\")\n\n @property\n def _ndarray_values(self):\n return self._data\n\n # ----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None):\n \"\"\"\n Helper method for astype when converting to strings.\n\n Returns\n -------\n ndarray[str]\n \"\"\"\n raise AbstractMethodError(self)\n\n def _formatter(self, boxed=False):\n # TODO: Remove Datetime & DatetimeTZ formatters.\n return \"'{}'\".format\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n @property\n def nbytes(self):\n return self._data.nbytes\n\n def __array__(self, dtype=None):\n # used for Timedelta/DatetimeArray, overwritten by PeriodArray\n if is_object_dtype(dtype):\n return np.array(list(self), dtype=object)\n return self._data\n\n @property\n def size(self) -> int:\n \"\"\"The number of elements in this array.\"\"\"\n return np.prod(self.shape)\n\n def __len__(self):\n return len(self._data)\n\n def __getitem__(self, key):\n \"\"\"\n This getitem defers to the underlying array, which by-definition can\n only handle list-likes, slices, and integer scalars\n \"\"\"\n\n is_int = lib.is_integer(key)\n if lib.is_scalar(key) and not is_int:\n raise IndexError(\n \"only integers, slices (`:`), ellipsis (`...`), \"\n \"numpy.newaxis (`None`) and integer or boolean \"\n \"arrays are valid indices\"\n )\n\n getitem = self._data.__getitem__\n if is_int:\n val = getitem(key)\n return self._box_func(val)\n\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n if key.all():\n key = slice(0, None, None)\n else:\n key = lib.maybe_booleans_to_slice(key.view(np.uint8))\n\n is_period = is_period_dtype(self)\n if is_period:\n freq = self.freq\n else:\n freq = None\n if isinstance(key, slice):\n if self.freq is not None and key.step is not None:\n freq = key.step * self.freq\n else:\n freq = self.freq\n elif key is Ellipsis:\n # GH#21282 indexing with Ellipsis is similar to a full slice,\n # should preserve `freq` attribute\n freq = self.freq\n\n result = getitem(key)\n if result.ndim > 1:\n # To support MPL which performs slicing with 2 dim\n # even though it only has 1 dim by definition\n if is_period:\n return self._simple_new(result, dtype=self.dtype, freq=freq)\n return result\n\n return self._simple_new(result, dtype=self.dtype, freq=freq)\n\n def __setitem__(\n self,\n key: Union[int, Sequence[int], Sequence[bool], slice],\n value: Union[NaTType, Any, Sequence[Any]],\n ) -> None:\n # I'm fudging the types a bit here. \"Any\" above really depends\n # on type(self). For PeriodArray, it's Period (or stuff coercible\n # to a period in from_sequence). For DatetimeArray, it's Timestamp...\n # I don't know if mypy can do that, possibly with Generics.\n # https://mypy.readthedocs.io/en/latest/generics.html\n if lib.is_scalar(value) and not isna(value):\n value = com.maybe_box_datetimelike(value)\n\n if is_list_like(value):\n is_slice = isinstance(key, slice)\n\n if lib.is_scalar(key):\n raise ValueError(\"setting an array element with a sequence.\")\n\n if not is_slice:\n key = cast(Sequence, key)\n if len(key) != len(value) and not com.is_bool_indexer(key):\n msg = (\n \"shape mismatch: value array of length '{}' does \"\n \"not match indexing result of length '{}'.\"\n )\n raise ValueError(msg.format(len(key), len(value)))\n elif not len(key):\n return\n\n value = type(self)._from_sequence(value, dtype=self.dtype)\n self._check_compatible_with(value)\n value = value.asi8\n elif isinstance(value, self._scalar_type):\n self._check_compatible_with(value)\n value = self._unbox_scalar(value)\n elif is_valid_nat_for_dtype(value, self.dtype):\n value = iNaT\n else:\n msg = (\n \"'value' should be a '{scalar}', 'NaT', or array of those. \"\n \"Got '{typ}' instead.\"\n )\n raise TypeError(\n msg.format(scalar=self._scalar_type.__name__, typ=type(value).__name__)\n )\n self._data[key] = value\n self._maybe_clear_freq()\n\n def _maybe_clear_freq(self):\n # inplace operations like __setitem__ may invalidate the freq of\n # DatetimeArray and TimedeltaArray\n pass\n\n def astype(self, dtype, copy=True):\n # Some notes on cases we don't have to handle here in the base class:\n # 1. PeriodArray.astype handles period -> period\n # 2. DatetimeArray.astype handles conversion between tz.\n # 3. DatetimeArray.astype handles datetime -> period\n from pandas import Categorical\n\n dtype = pandas_dtype(dtype)\n\n if is_object_dtype(dtype):\n return self._box_values(self.asi8)\n elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):\n return self._format_native_types()\n elif is_integer_dtype(dtype):\n # we deliberately ignore int32 vs. int64 here.\n # See https://github.com/pandas-dev/pandas/issues/24381 for more.\n values = self.asi8\n\n if is_unsigned_integer_dtype(dtype):\n # Again, we ignore int32 vs. int64\n values = values.view(\"uint64\")\n\n if copy:\n values = values.copy()\n return values\n elif (\n is_datetime_or_timedelta_dtype(dtype)\n and not is_dtype_equal(self.dtype, dtype)\n ) or is_float_dtype(dtype):\n # disallow conversion between datetime/timedelta,\n # and conversions for any datetimelike to float\n msg = \"Cannot cast {name} to dtype {dtype}\"\n raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))\n elif is_categorical_dtype(dtype):\n return Categorical(self, dtype=dtype)\n else:\n return np.asarray(self, dtype=dtype)\n\n def view(self, dtype=None):\n if dtype is None or dtype is self.dtype:\n return type(self)(self._data, dtype=self.dtype)\n return self._data.view(dtype=dtype)\n\n # ------------------------------------------------------------------\n # ExtensionArray Interface\n\n def unique(self):\n result = unique1d(self.asi8)\n return type(self)(result, dtype=self.dtype)\n\n def _validate_fill_value(self, fill_value):\n \"\"\"\n If a fill_value is passed to `take` convert it to an i8 representation,\n raising ValueError if this is not possible.\n\n Parameters\n ----------\n fill_value : object\n\n Returns\n -------\n fill_value : np.int64\n\n Raises\n ------\n ValueError\n \"\"\"\n raise AbstractMethodError(self)\n\n def take(self, indices, allow_fill=False, fill_value=None):\n if allow_fill:\n fill_value = self._validate_fill_value(fill_value)\n\n new_values = take(\n self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value\n )\n\n return type(self)(new_values, dtype=self.dtype)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n dtypes = {x.dtype for x in to_concat}\n assert len(dtypes) == 1\n dtype = list(dtypes)[0]\n\n values = np.concatenate([x.asi8 for x in to_concat])\n return cls(values, dtype=dtype)\n\n def copy(self):\n values = self.asi8.copy()\n return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)\n\n def _values_for_factorize(self):\n return self.asi8, iNaT\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values, dtype=original.dtype)\n\n def _values_for_argsort(self):\n return self._data\n\n # ------------------------------------------------------------------\n # Additional array methods\n # These are not part of the EA API, but we implement them because\n # pandas assumes they're there.\n\n def searchsorted(self, value, side=\"left\", sorter=None):\n \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted array `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n indices : array of ints\n Array of insertion points with the same shape as `value`.\n \"\"\"\n if isinstance(value, str):\n value = self._scalar_from_string(value)\n\n if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):\n raise ValueError(\n \"Unexpected type for 'value': {valtype}\".format(valtype=type(value))\n )\n\n self._check_compatible_with(value)\n if isinstance(value, type(self)):\n value = value.asi8\n else:\n value = self._unbox_scalar(value)\n\n return self.asi8.searchsorted(value, side=side, sorter=sorter)\n\n def repeat(self, repeats, *args, **kwargs):\n \"\"\"\n Repeat elements of an array.\n\n See Also\n --------\n numpy.ndarray.repeat\n \"\"\"\n nv.validate_repeat(args, kwargs)\n values = self._data.repeat(repeats)\n return type(self)(values.view(\"i8\"), dtype=self.dtype)\n\n def value_counts(self, dropna=False):\n \"\"\"\n Return a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaT values.\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import Series, Index\n\n if dropna:\n values = self[~self.isna()]._data\n else:\n values = self._data\n\n cls = type(self)\n\n result = value_counts(values, sort=False, dropna=dropna)\n index = Index(\n cls(result.index.view(\"i8\"), dtype=self.dtype), name=result.index.name\n )\n return Series(result.values, index=index, name=result.name)\n\n def map(self, mapper):\n # TODO(GH-23179): Add ExtensionArray.map\n # Need to figure out if we want ExtensionArray.map first.\n # If so, then we can refactor IndexOpsMixin._map_values to\n # a standalone function and call from here..\n # Else, just rewrite _map_infer_values to do the right thing.\n from pandas import Index\n\n return Index(self).map(mapper).array\n\n # ------------------------------------------------------------------\n # Null Handling\n\n def isna(self):\n return self._isnan\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _isnan(self):\n \"\"\"\n return if each value is nan\n \"\"\"\n return self.asi8 == iNaT\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _hasnans(self):\n \"\"\"\n return if I have any nans; enables various perf speedups\n \"\"\"\n return bool(self._isnan.any())\n\n def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):\n \"\"\"\n Parameters\n ----------\n result : a ndarray\n fill_value : object, default iNaT\n convert : string/dtype or None\n\n Returns\n -------\n result : ndarray with values replace by the fill_value\n\n mask the result if needed, convert to the provided dtype if its not\n None\n\n This is an internal routine.\n \"\"\"\n\n if self._hasnans:\n if convert:\n result = result.astype(convert)\n if fill_value is None:\n fill_value = np.nan\n result[self._isnan] = fill_value\n return result\n\n def fillna(self, value=None, method=None, limit=None):\n # TODO(GH-20300): remove this\n # Just overriding to ensure that we avoid an astype(object).\n # Either 20300 or a `_values_for_fillna` would avoid this duplication.\n if isinstance(value, ABCSeries):\n value = value.array\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\n \"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self))\n )\n value = value[mask]\n\n if mask.any():\n if method is not None:\n if method == \"pad\":\n func = missing.pad_1d\n else:\n func = missing.backfill_1d\n\n values = self._data\n if not is_period_dtype(self):\n # For PeriodArray self._data is i8, which gets copied\n # by `func`. Otherwise we need to make a copy manually\n # to avoid modifying `self` in-place.\n values = values.copy()\n\n new_values = func(values, limit=limit, mask=mask)\n if is_datetime64tz_dtype(self):\n # we need to pass int64 values to the constructor to avoid\n # re-localizing incorrectly\n new_values = new_values.view(\"i8\")\n new_values = type(self)(new_values, dtype=self.dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n # ------------------------------------------------------------------\n # Frequency Properties/Methods\n\n @property\n def freq(self):\n \"\"\"\n Return the frequency object if it is set, otherwise None.\n \"\"\"\n return self._freq\n\n @freq.setter\n def freq(self, value):\n if value is not None:\n value = frequencies.to_offset(value)\n self._validate_frequency(self, value)\n\n self._freq = value\n\n @property\n def freqstr(self):\n \"\"\"\n Return the frequency object as a string if its set, otherwise None\n \"\"\"\n if self.freq is None:\n return None\n return self.freq.freqstr\n\n @property # NB: override with cache_readonly in immutable subclasses\n def inferred_freq(self):\n \"\"\"\n Tryies to return a string representing a frequency guess,\n generated by infer_freq. Returns None if it can't autodetect the\n frequency.\n \"\"\"\n try:\n return frequencies.infer_freq(self)\n except ValueError:\n return None\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution(self):\n return frequencies.Resolution.get_reso_from_freq(self.freqstr)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def resolution(self):\n \"\"\"\n Returns day, hour, minute, second, millisecond or microsecond\n \"\"\"\n return frequencies.Resolution.get_str(self._resolution)\n\n @classmethod\n def _validate_frequency(cls, index, freq, **kwargs):\n \"\"\"\n Validate that a frequency is compatible with the values of a given\n Datetime Array/Index or Timedelta Array/Index\n\n Parameters\n ----------\n index : DatetimeIndex or TimedeltaIndex\n The index on which to determine if the given frequency is valid\n freq : DateOffset\n The frequency to validate\n \"\"\"\n if is_period_dtype(cls):\n # Frequency validation is not meaningful for Period Array/Index\n return None\n\n inferred = index.inferred_freq\n if index.size == 0 or inferred == freq.freqstr:\n return None\n\n try:\n on_freq = cls._generate_range(\n start=index[0], end=None, periods=len(index), freq=freq, **kwargs\n )\n if not np.array_equal(index.asi8, on_freq.asi8):\n raise ValueError\n except ValueError as e:\n if \"non-fixed\" in str(e):\n # non-fixed frequencies are not meaningful for timedelta64;\n # we retain that error message\n raise e\n # GH#11587 the main way this is reached is if the `np.array_equal`\n # check above is False. This can also be reached if index[0]\n # is `NaT`, in which case the call to `cls._generate_range` will\n # raise a ValueError, which we re-raise with a more targeted\n # message.\n raise ValueError(\n \"Inferred frequency {infer} from passed values \"\n \"does not conform to passed frequency {passed}\".format(\n infer=inferred, passed=freq.freqstr\n )\n )\n\n # monotonicity/uniqueness properties are called via frequencies.infer_freq,\n # see GH#23789\n\n @property\n def _is_monotonic_increasing(self):\n return algos.is_monotonic(self.asi8, timelike=True)[0]\n\n @property\n def _is_monotonic_decreasing(self):\n return algos.is_monotonic(self.asi8, timelike=True)[1]\n\n @property\n def _is_unique(self):\n return len(unique1d(self.asi8)) == len(self)\n\n # ------------------------------------------------------------------\n # Arithmetic Methods\n\n # pow is invalid for all three subclasses; TimedeltaArray will override\n # the multiplication and division ops\n __pow__ = make_invalid_op(\"__pow__\")\n __rpow__ = make_invalid_op(\"__rpow__\")\n __mul__ = make_invalid_op(\"__mul__\")\n __rmul__ = make_invalid_op(\"__rmul__\")\n __truediv__ = make_invalid_op(\"__truediv__\")\n __rtruediv__ = make_invalid_op(\"__rtruediv__\")\n __floordiv__ = make_invalid_op(\"__floordiv__\")\n __rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n __mod__ = make_invalid_op(\"__mod__\")\n __rmod__ = make_invalid_op(\"__rmod__\")\n __divmod__ = make_invalid_op(\"__divmod__\")\n __rdivmod__ = make_invalid_op(\"__rdivmod__\")\n\n def _add_datetimelike_scalar(self, other):\n # Overriden by TimedeltaArray\n raise TypeError(\n \"cannot add {cls} and {typ}\".format(\n cls=type(self).__name__, typ=type(other).__name__\n )\n )\n\n _add_datetime_arraylike = _add_datetimelike_scalar\n\n def _sub_datetimelike_scalar(self, other):\n # Overridden by DatetimeArray\n assert other is not NaT\n raise TypeError(\n \"cannot subtract a datelike from a {cls}\".format(cls=type(self).__name__)\n )\n\n _sub_datetime_arraylike = _sub_datetimelike_scalar\n\n def _sub_period(self, other):\n # Overriden by PeriodArray\n raise TypeError(\n \"cannot subtract Period from a {cls}\".format(cls=type(self).__name__)\n )\n\n def _add_offset(self, offset):\n raise AbstractMethodError(self)\n\n def _add_delta(self, other):\n \"\"\"\n Add a timedelta-like, Tick or TimedeltaIndex-like object\n to self, yielding an int64 numpy array\n\n Parameters\n ----------\n delta : {timedelta, np.timedelta64, Tick,\n TimedeltaIndex, ndarray[timedelta64]}\n\n Returns\n -------\n result : ndarray[int64]\n\n Notes\n -----\n The result's name is set outside of _add_delta by the calling\n method (__add__ or __sub__), if necessary (i.e. for Indexes).\n \"\"\"\n if isinstance(other, (Tick, timedelta, np.timedelta64)):\n new_values = self._add_timedeltalike_scalar(other)\n elif is_timedelta64_dtype(other):\n # ndarray[timedelta64] or TimedeltaArray/index\n new_values = self._add_delta_tdi(other)\n\n return new_values\n\n def _add_timedeltalike_scalar(self, other):\n \"\"\"\n Add a delta of a timedeltalike\n return the i8 result view\n \"\"\"\n if isna(other):\n # i.e np.timedelta64(\"NaT\"), not recognized by delta_to_nanoseconds\n new_values = np.empty(len(self), dtype=\"i8\")\n new_values[:] = iNaT\n return new_values\n\n inc = delta_to_nanoseconds(other)\n new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(\n \"i8\"\n )\n new_values = self._maybe_mask_results(new_values)\n return new_values.view(\"i8\")\n\n def _add_delta_tdi(self, other):\n \"\"\"\n Add a delta of a TimedeltaIndex\n return the i8 result view\n \"\"\"\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n # ndarray[timedelta64]; wrap in TimedeltaIndex for op\n from pandas.core.arrays import TimedeltaArray\n\n other = TimedeltaArray._from_sequence(other)\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n new_values = checked_add_with_arr(\n self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan\n )\n if self._hasnans or other._hasnans:\n mask = (self._isnan) | (other._isnan)\n new_values[mask] = iNaT\n return new_values.view(\"i8\")\n\n def _add_nat(self):\n \"\"\"\n Add pd.NaT to self\n \"\"\"\n if is_period_dtype(self):\n raise TypeError(\n \"Cannot add {cls} and {typ}\".format(\n cls=type(self).__name__, typ=type(NaT).__name__\n )\n )\n\n # GH#19124 pd.NaT is treated like a timedelta for both timedelta\n # and datetime dtypes\n result = np.zeros(len(self), dtype=np.int64)\n result.fill(iNaT)\n return type(self)(result, dtype=self.dtype, freq=None)\n\n def _sub_nat(self):\n \"\"\"\n Subtract pd.NaT from self\n \"\"\"\n # GH#19124 Timedelta - datetime is not in general well-defined.\n # We make an exception for pd.NaT, which in this case quacks\n # like a timedelta.\n # For datetime64 dtypes by convention we treat NaT as a datetime, so\n # this subtraction returns a timedelta64 dtype.\n # For period dtype, timedelta64 is a close-enough return dtype.\n result = np.zeros(len(self), dtype=np.int64)\n result.fill(iNaT)\n return result.view(\"timedelta64[ns]\")\n\n def _sub_period_array(self, other):\n \"\"\"\n Subtract a Period Array/Index from self. This is only valid if self\n is itself a Period Array/Index, raises otherwise. Both objects must\n have the same frequency.\n\n Parameters\n ----------\n other : PeriodIndex or PeriodArray\n\n Returns\n -------\n result : np.ndarray[object]\n Array of DateOffset objects; nulls represented by NaT.\n \"\"\"\n if not is_period_dtype(self):\n raise TypeError(\n \"cannot subtract {dtype}-dtype from {cls}\".format(\n dtype=other.dtype, cls=type(self).__name__\n )\n )\n\n if len(self) != len(other):\n raise ValueError(\"cannot subtract arrays/indices of unequal length\")\n if self.freq != other.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr\n )\n raise IncompatibleFrequency(msg)\n\n new_values = checked_add_with_arr(\n self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan\n )\n\n new_values = np.array([self.freq.base * x for x in new_values])\n if self._hasnans or other._hasnans:\n mask = (self._isnan) | (other._isnan)\n new_values[mask] = NaT\n return new_values\n\n def _addsub_int_array(self, other, op):\n \"\"\"\n Add or subtract array-like of integers equivalent to applying\n `_time_shift` pointwise.\n\n Parameters\n ----------\n other : Index, ExtensionArray, np.ndarray\n integer-dtype\n op : {operator.add, operator.sub}\n\n Returns\n -------\n result : same class as self\n \"\"\"\n # _addsub_int_array is overriden by PeriodArray\n assert not is_period_dtype(self)\n assert op in [operator.add, operator.sub]\n\n if self.freq is None:\n # GH#19123\n raise NullFrequencyError(\"Cannot shift with no freq\")\n\n elif isinstance(self.freq, Tick):\n # easy case where we can convert to timedelta64 operation\n td = Timedelta(self.freq)\n return op(self, td * other)\n\n # We should only get here with DatetimeIndex; dispatch\n # to _addsub_offset_array\n assert not is_timedelta64_dtype(self)\n return op(self, np.array(other) * self.freq)\n\n def _addsub_offset_array(self, other, op):\n \"\"\"\n Add or subtract array-like of DateOffset objects\n\n Parameters\n ----------\n other : Index, np.ndarray\n object-dtype containing pd.DateOffset objects\n op : {operator.add, operator.sub}\n\n Returns\n -------\n result : same class as self\n \"\"\"\n assert op in [operator.add, operator.sub]\n if len(other) == 1:\n return op(self, other[0])\n\n warnings.warn(\n \"Adding/subtracting array of DateOffsets to \"\n \"{cls} not vectorized\".format(cls=type(self).__name__),\n PerformanceWarning,\n )\n\n # For EA self.astype('O') returns a numpy array, not an Index\n left = lib.values_from_object(self.astype(\"O\"))\n\n res_values = op(left, np.array(other))\n kwargs = {}\n if not is_period_dtype(self):\n kwargs[\"freq\"] = \"infer\"\n return self._from_sequence(res_values, **kwargs)\n\n def _time_shift(self, periods, freq=None):\n \"\"\"\n Shift each value by `periods`.\n\n Note this is different from ExtensionArray.shift, which\n shifts the *position* of each element, padding the end with\n missing values.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift by.\n freq : pandas.DateOffset, pandas.Timedelta, or string\n Frequency increment to shift by.\n \"\"\"\n if freq is not None and freq != self.freq:\n if isinstance(freq, str):\n freq = frequencies.to_offset(freq)\n offset = periods * freq\n result = self + offset\n return result\n\n if periods == 0:\n # immutable so OK\n return self.copy()\n\n if self.freq is None:\n raise NullFrequencyError(\"Cannot shift with no freq\")\n\n start = self[0] + periods * self.freq\n end = self[-1] + periods * self.freq\n\n # Note: in the DatetimeTZ case, _generate_range will infer the\n # appropriate timezone from `start` and `end`, so tz does not need\n # to be passed explicitly.\n return self._generate_range(start=start, end=end, periods=None, freq=self.freq)\n\n def __add__(self, other):\n other = lib.item_from_zerodim(other)\n if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):\n return NotImplemented\n\n # scalar others\n elif other is NaT:\n result = self._add_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_delta(other)\n elif isinstance(other, DateOffset):\n # specifically _not_ a Tick\n result = self._add_offset(other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._add_datetimelike_scalar(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self):\n maybe_integer_op_deprecated(self)\n result = self._time_shift(other)\n\n # array-like others\n elif is_timedelta64_dtype(other):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_delta(other)\n elif is_offsetlike(other):\n # Array/Index of DateOffset objects\n result = self._addsub_offset_array(other, operator.add)\n elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):\n # DatetimeIndex, ndarray[datetime64]\n return self._add_datetime_arraylike(other)\n elif is_integer_dtype(other):\n if not is_period_dtype(self):\n maybe_integer_op_deprecated(self)\n result = self._addsub_int_array(other, operator.add)\n elif is_float_dtype(other):\n # Explicitly catch invalid dtypes\n raise TypeError(\n \"cannot add {dtype}-dtype to {cls}\".format(\n dtype=other.dtype, cls=type(self).__name__\n )\n )\n elif is_period_dtype(other):\n # if self is a TimedeltaArray and other is a PeriodArray with\n # a timedelta-like (i.e. Tick) freq, this operation is valid.\n # Defer to the PeriodArray implementation.\n # In remaining cases, this will end up raising TypeError.\n return NotImplemented\n elif is_extension_array_dtype(other):\n # Categorical op will raise; defer explicitly\n return NotImplemented\n else: # pragma: no cover\n return NotImplemented\n\n if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):\n from pandas.core.arrays import TimedeltaArray\n\n # TODO: infer freq?\n return TimedeltaArray(result)\n return result\n\n def __radd__(self, other):\n # alias for __add__\n return self.__add__(other)\n\n def __sub__(self, other):\n other = lib.item_from_zerodim(other)\n if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):\n return NotImplemented\n\n # scalar others\n elif other is NaT:\n result = self._sub_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_delta(-other)\n elif isinstance(other, DateOffset):\n # specifically _not_ a Tick\n result = self._add_offset(-other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._sub_datetimelike_scalar(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self):\n maybe_integer_op_deprecated(self)\n result = self._time_shift(-other)\n\n elif isinstance(other, Period):\n result = self._sub_period(other)\n\n # array-like others\n elif is_timedelta64_dtype(other):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_delta(-other)\n elif is_offsetlike(other):\n # Array/Index of DateOffset objects\n result = self._addsub_offset_array(other, operator.sub)\n elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):\n # DatetimeIndex, ndarray[datetime64]\n result = self._sub_datetime_arraylike(other)\n elif is_period_dtype(other):\n # PeriodIndex\n result = self._sub_period_array(other)\n elif is_integer_dtype(other):\n if not is_period_dtype(self):\n maybe_integer_op_deprecated(self)\n result = self._addsub_int_array(other, operator.sub)\n elif isinstance(other, ABCIndexClass):\n raise TypeError(\n \"cannot subtract {cls} and {typ}\".format(\n cls=type(self).__name__, typ=type(other).__name__\n )\n )\n elif is_float_dtype(other):\n # Explicitly catch invalid dtypes\n raise TypeError(\n \"cannot subtract {dtype}-dtype from {cls}\".format(\n dtype=other.dtype, cls=type(self).__name__\n )\n )\n elif is_extension_array_dtype(other):\n # Categorical op will raise; defer explicitly\n return NotImplemented\n else: # pragma: no cover\n return NotImplemented\n\n if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):\n from pandas.core.arrays import TimedeltaArray\n\n # TODO: infer freq?\n return TimedeltaArray(result)\n return result\n\n def __rsub__(self, other):\n if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self):\n # ndarray[datetime64] cannot be subtracted from self, so\n # we need to wrap in DatetimeArray/Index and flip the operation\n if not isinstance(other, DatetimeLikeArrayMixin):\n # Avoid down-casting DatetimeIndex\n from pandas.core.arrays import DatetimeArray\n\n other = DatetimeArray(other)\n return other - self\n elif (\n is_datetime64_any_dtype(self)\n and hasattr(other, \"dtype\")\n and not is_datetime64_any_dtype(other)\n ):\n # GH#19959 datetime - datetime is well-defined as timedelta,\n # but any other type - datetime is not well-defined.\n raise TypeError(\n \"cannot subtract {cls} from {typ}\".format(\n cls=type(self).__name__, typ=type(other).__name__\n )\n )\n elif is_period_dtype(self) and is_timedelta64_dtype(other):\n # TODO: Can we simplify/generalize these cases at all?\n raise TypeError(\n \"cannot subtract {cls} from {dtype}\".format(\n cls=type(self).__name__, dtype=other.dtype\n )\n )\n return -(self - other)\n\n # FIXME: DTA/TDA/PA inplace methods should actually be inplace, GH#24115\n def __iadd__(self, other):\n # alias for __add__\n return self.__add__(other)\n\n def __isub__(self, other):\n # alias for __sub__\n return self.__sub__(other)\n\n # --------------------------------------------------------------\n # Comparison Methods\n\n def _ensure_localized(\n self, arg, ambiguous=\"raise\", nonexistent=\"raise\", from_utc=False\n ):\n \"\"\"\n Ensure that we are re-localized.\n\n This is for compat as we can then call this on all datetimelike\n arrays generally (ignored for Period/Timedelta)\n\n Parameters\n ----------\n arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray]\n ambiguous : str, bool, or bool-ndarray, default 'raise'\n nonexistent : str, default 'raise'\n from_utc : bool, default False\n If True, localize the i8 ndarray to UTC first before converting to\n the appropriate tz. If False, localize directly to the tz.\n\n Returns\n -------\n localized array\n \"\"\"\n\n # reconvert to local tz\n tz = getattr(self, \"tz\", None)\n if tz is not None:\n if not isinstance(arg, type(self)):\n arg = self._simple_new(arg)\n if from_utc:\n arg = arg.tz_localize(\"UTC\").tz_convert(self.tz)\n else:\n arg = arg.tz_localize(\n self.tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n return arg\n\n # --------------------------------------------------------------\n # Reductions\n\n def _reduce(self, name, axis=0, skipna=True, **kwargs):\n op = getattr(self, name, None)\n if op:\n return op(skipna=skipna, **kwargs)\n else:\n return super()._reduce(name, skipna, **kwargs)\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Array or minimum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.min\n Index.min : Return the minimum value in an Index.\n Series.min : Return the minimum value in a Series.\n \"\"\"\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())\n if isna(result):\n # Period._from_ordinal does not handle np.nan gracefully\n return NaT\n return self._box_func(result)\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Array or maximum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.max\n Index.max : Return the maximum value in an Index.\n Series.max : Return the maximum value in a Series.\n \"\"\"\n # TODO: skipna is broken with max.\n # See https://github.com/pandas-dev/pandas/issues/24265\n nv.validate_max(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n mask = self.isna()\n if skipna:\n values = self[~mask].asi8\n elif mask.any():\n return NaT\n else:\n values = self.asi8\n\n if not len(values):\n # short-circut for empty max / min\n return NaT\n\n result = nanops.nanmax(values, skipna=skipna)\n # Don't have to worry about NA `result`, since no NA went in.\n return self._box_func(result)\n\n def mean(self, skipna=True):\n \"\"\"\n Return the mean value of the Array.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n skipna : bool, default True\n Whether to ignore any NaT elements\n\n Returns\n -------\n scalar (Timestamp or Timedelta)\n\n See Also\n --------\n numpy.ndarray.mean\n Series.mean : Return the mean value in a Series.\n\n Notes\n -----\n mean is only defined for Datetime and Timedelta dtypes, not for Period.\n \"\"\"\n if is_period_dtype(self):\n # See discussion in GH#24757\n raise TypeError(\n \"mean is not implemented for {cls} since the meaning is \"\n \"ambiguous. An alternative is \"\n \"obj.to_timestamp(how='start').mean()\".format(cls=type(self).__name__)\n )\n\n mask = self.isna()\n if skipna:\n values = self[~mask]\n elif mask.any():\n return NaT\n else:\n values = self\n\n if not len(values):\n # short-circut for empty max / min\n return NaT\n\n result = nanops.nanmean(values.view(\"i8\"), skipna=skipna)\n # Don't have to worry about NA `result`, since no NA went in.\n return self._box_func(result)\n\n\n# -------------------------------------------------------------------\n# Shared Constructor Helpers\n\n\ndef validate_periods(periods):\n \"\"\"\n If a `periods` argument is passed to the Datetime/Timedelta Array/Index\n constructor, cast it to an integer.\n\n Parameters\n ----------\n periods : None, float, int\n\n Returns\n -------\n periods : None or int\n\n Raises\n ------\n TypeError\n if periods is None, float, or int\n \"\"\"\n if periods is not None:\n if lib.is_float(periods):\n periods = int(periods)\n elif not lib.is_integer(periods):\n raise TypeError(\n \"periods must be a number, got {periods}\".format(periods=periods)\n )\n return periods\n\n\ndef validate_endpoints(closed):\n \"\"\"\n Check that the `closed` argument is among [None, \"left\", \"right\"]\n\n Parameters\n ----------\n closed : {None, \"left\", \"right\"}\n\n Returns\n -------\n left_closed : bool\n right_closed : bool\n\n Raises\n ------\n ValueError : if argument is not among valid values\n \"\"\"\n left_closed = False\n right_closed = False\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n\n return left_closed, right_closed\n\n\ndef validate_inferred_freq(freq, inferred_freq, freq_infer):\n \"\"\"\n If the user passes a freq and another freq is inferred from passed data,\n require that they match.\n\n Parameters\n ----------\n freq : DateOffset or None\n inferred_freq : DateOffset or None\n freq_infer : bool\n\n Returns\n -------\n freq : DateOffset or None\n freq_infer : bool\n\n Notes\n -----\n We assume at this point that `maybe_infer_freq` has been called, so\n `freq` is either a DateOffset object or None.\n \"\"\"\n if inferred_freq is not None:\n if freq is not None and freq != inferred_freq:\n raise ValueError(\n \"Inferred frequency {inferred} from passed \"\n \"values does not conform to passed frequency \"\n \"{passed}\".format(inferred=inferred_freq, passed=freq.freqstr)\n )\n elif freq is None:\n freq = inferred_freq\n freq_infer = False\n\n return freq, freq_infer\n\n\ndef maybe_infer_freq(freq):\n \"\"\"\n Comparing a DateOffset to the string \"infer\" raises, so we need to\n be careful about comparisons. Make a dummy variable `freq_infer` to\n signify the case where the given freq is \"infer\" and set freq to None\n to avoid comparison trouble later on.\n\n Parameters\n ----------\n freq : {DateOffset, None, str}\n\n Returns\n -------\n freq : {DateOffset, None}\n freq_infer : bool\n \"\"\"\n freq_infer = False\n if not isinstance(freq, DateOffset):\n # if a passed freq is None, don't infer automatically\n if freq != \"infer\":\n freq = frequencies.to_offset(freq)\n else:\n freq_infer = True\n freq = None\n return freq, freq_infer\n\n\ndef _ensure_datetimelike_to_i8(other, to_utc=False):\n \"\"\"\n Helper for coercing an input scalar or array to i8.\n\n Parameters\n ----------\n other : 1d array\n to_utc : bool, default False\n If True, convert the values to UTC before extracting the i8 values\n If False, extract the i8 values directly.\n\n Returns\n -------\n i8 1d array\n \"\"\"\n from pandas import Index\n\n if lib.is_scalar(other) and isna(other):\n return iNaT\n elif isinstance(other, (ABCPeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)):\n # convert tz if needed\n if getattr(other, \"tz\", None) is not None:\n if to_utc:\n other = other.tz_convert(\"UTC\")\n else:\n other = other.tz_localize(None)\n else:\n try:\n return np.array(other, copy=False).view(\"i8\")\n except TypeError:\n # period array cannot be coerced to int\n other = Index(other)\n return other.asi8\n"
]
| [
[
"pandas.core.dtypes.common.is_string_dtype",
"numpy.array_equal",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas._libs.tslibs.c_timestamp.maybe_integer_op_deprecated",
"pandas.core.algorithms.value_counts",
"pandas._libs.tslibs.timedeltas.delta_to_nanoseconds",
"pandas.compat.numpy.function.validate_max",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas._libs.tslibs.period.IncompatibleFrequency",
"pandas._libs.lib.is_float",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.lib.is_scalar",
"pandas._libs.lib.is_integer",
"numpy.concatenate",
"pandas.core.dtypes.missing.isna",
"pandas.core.algorithms.take",
"pandas._libs.tslibs.timestamps.round_nsint64",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.nanops.nanmax",
"pandas.core.dtypes.inference.is_array_like",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.compat.numpy.function.validate_minmax_axis",
"numpy.prod",
"pandas.core.dtypes.common.is_offsetlike",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.compat.numpy.function.validate_repeat",
"pandas.core.arrays.TimedeltaArray",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.errors.AbstractMethodError",
"numpy.array",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.tseries.frequencies.Resolution.get_str",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.core.algorithms.unique1d",
"pandas.errors.NullFrequencyError",
"pandas.core.common.maybe_box_datetimelike",
"pandas.tseries.frequencies.Resolution.get_reso_from_freq",
"pandas.core.dtypes.common.is_list_like",
"pandas.tseries.frequencies.infer_freq",
"pandas.core.ops.invalid.make_invalid_op",
"pandas._libs.tslibs.timedeltas.Timedelta",
"pandas.compat.numpy.function.validate_min",
"pandas._libs.lib.map_infer",
"pandas._libs.algos.is_monotonic",
"pandas.tseries.frequencies.to_offset",
"pandas.Index",
"numpy.asarray",
"pandas.core.arrays.DatetimeArray",
"pandas.Categorical",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.Series",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_categorical_dtype"
]
]
|
ZJU-ZhangY/OpenAttack | [
"c1d095b595257caa226e902b2c89b36845164f6a"
]
| [
"OpenAttack/utils/albert_model.py"
]
| [
"import numpy as np \nimport pickle, os\nfrom ..classifier import Classifier\n\nclass AlbertModel():\n def __init__(self, model_path, num_labels, max_len = 100, device=\"cpu\"):\n import transformers\n self.tokenizer = transformers.AlbertTokenizer.from_pretrained(model_path)\n self.device = device\n self.model = transformers.AlbertForSequenceClassification.from_pretrained(model_path, num_labels=num_labels,output_hidden_states=False)\n self.model.eval()\n self.model.to(self.device)\n self.hook = self.model.albert.embeddings.word_embeddings.register_forward_hook(self.__hook_fn)\n self.max_len = max_len\n \n self.word2id = pickle.load(open(os.path.join(model_path, \"albert_word2id.pkl\"), \"rb\"))\n \n def to(self, device):\n self.device = device\n self.model.to(self.device)\n return self\n\n def __hook_fn(self, module, input_, output_):\n self.curr_embedding = output_\n output_.retain_grad()\n\n def tokenize_corpus(self,corpus):\n tokenized_list = []\n attention_masks = []\n sent_lens = []\n for i in range(len(corpus)):\n sentence = corpus[i]\n result = self.tokenizer.encode_plus(sentence,max_length = self.max_len,pad_to_max_length = True,return_attention_mask = True, truncation=True)\n sent_lens.append( sum(result[\"attention_mask\"]) - 2 )\n sentence_ids = result['input_ids']\n mask = result['attention_mask']\n attention_masks.append(mask)\n tokenized_list.append(sentence_ids)\n return np.array(tokenized_list),np.array(attention_masks), sent_lens\n\n def predict(self,sen_list, labels=None, tokenize=True):\n import torch\n if tokenize:\n tokeinzed_sen, attentions, sent_lens = self.tokenize_corpus(sen_list)\n else:\n sen_list = [\n sen[:self.max_len - 2] for sen in sen_list\n ]\n sent_lens = [ len(sen) for sen in sen_list ]\n attentions = np.array([\n [1] * (len(sen) + 2) + [0] * (self.max_len - 2 - len(sen))\n for sen in sen_list\n ], dtype='int64')\n sen_list = [\n [self.word2id[token] if token in self.word2id else self.word2id[\"[UNK]\"] for token in sen]\n + [self.word2id[\"[PAD]\"]] * (self.max_len - 2 - len(sen))\n for sen in sen_list\n ]\n tokeinzed_sen = np.array([\n [self.word2id[\"[CLS]\"]] + sen + [self.word2id[\"[SEP]\"]]\n for sen in sen_list\n ], dtype='int64')\n\n result = []\n result_grad = []\n \n if labels is None:\n labels = [0] * len(sen_list)\n labels = torch.LongTensor(labels).to(self.device)\n\n for i in range(len(tokeinzed_sen)):\n curr_sen = tokeinzed_sen[i]\n curr_mask = attentions[i]\n xs = torch.LongTensor([curr_sen]).to(self.device)\n masks = torch.LongTensor([curr_mask]).to(self.device)\n \n loss, logits = self.model(input_ids = xs,attention_mask = masks, labels=labels[i:i+1])\n logits = torch.nn.functional.softmax(logits,dim=-1)\n loss = - loss\n loss.backward()\n result_grad.append(self.curr_embedding.grad[0].clone())\n result.append(logits.cpu().detach().numpy()[0])\n self.curr_embedding.grad.zero_()\n\n max_len = max(sent_lens)\n result = np.array(result)\n result_grad = torch.stack(result_grad).cpu().numpy()[:, 1:1 + max_len]\n return result, result_grad\n\nclass AlbertClassifier(Classifier):\n def __init__(self, model_path, num_labels, max_len = 100, device=\"cpu\"):\n self.__model = AlbertModel(model_path, num_labels, max_len, device)\n self.word2id = self.__model.word2id\n self.embedding = self.__model.model.albert.embeddings.word_embeddings.weight.detach().cpu().numpy()\n \n def to(self, device):\n self.__model.to(device)\n return self\n \n def get_prob(self, input_):\n return self.__model.predict(input_, [0] * len(input_))[0]\n \n def get_grad(self, input_, labels):\n return self.__model.predict(input_, labels, tokenize=False)"
]
| [
[
"numpy.array",
"torch.LongTensor",
"torch.stack",
"torch.nn.functional.softmax"
]
]
|
Timo1997/Simplex | [
"239e79f60faa6a6cf46b6cce4d5798118c4630ed"
]
| [
"simplex_algorithm.py"
]
| [
"\nimport pandas as pd\nimport numpy as np\nimport copy\nimport sympy as sp\nfrom sympy import sympify\n\n\ndef get_pivotzeile(copy_tableau, pivot_spalte, anzahl_zeilen):\n # soll original Tableau nicht ändern\n copy_tableau = copy.deepcopy(copy_tableau)\n # wähle Ressourcenverbrauchskoeffizienten der Pivotspalte\n pivot_spalte_values = copy_tableau.iloc[copy_tableau.index.difference([0, 1, (anzahl_zeilen-1), (anzahl_zeilen-2)]), pivot_spalte]\n # wähle Menge der Restriktionen\n quantity = copy_tableau.iloc[copy_tableau.index.difference([0, 1, (anzahl_zeilen-1), (anzahl_zeilen-2)]), 2]\n #verhinden von teilen durch negative Zahlen und 0\n pivot_spalte_values.mask(pivot_spalte_values <= 0 , np.nan, inplace = True)\n #Hilfsmatrix zum ermitteln der Pivotspalte\n copy_tableau = quantity / pivot_spalte_values\n #übergabe der Zeilenid mit dem kleinsten Wert\n return copy_tableau.astype(float).idxmin(skipna=True)\n\n \n \ndef get_pivotspalte(copy_tableau, infinite):\n # soll original Tableau nicht ändern\n copy_tableau = copy.deepcopy(copy_tableau)\n #Schleife über alle Spalten\n for column in copy_tableau:\n #nur Zeilen mit Ressourcenverbrauchskoeffizienten werden angesehen\n if column != 0 and column != 1 and column != 2:\n #zum Berechnen der größten cj-zj Zeile muss wenn nötig M durch ansatzweise unendlich ersetzt werden\n if isinstance(copy_tableau.iloc[-1,column], sp.Basic): # Filtern der Felder mit M\n copy_tableau.iloc[-1,column] = copy_tableau.iloc[-1,column].subs(infinite, 9999)\n copy_tableau.iloc[-1,column] = int(copy_tableau.iloc[-1,column])\n #bestimmen des Spaltenid, welche den größten Wert enthält\n pivot_spalte = copy_tableau.iloc[-1,3:].astype(float).idxmax(axis=0)\n return pivot_spalte\n\n#-----------------------------------------------------------------------------\n\ndef update_simplex_tableau(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen):\n #Pivotelelement wird auf Wert 1 gebracht indem man die Zeile durch das Pivotelement teilt\n copy_tableau.iloc[pivot_zeile, 2:] = (copy_tableau.iloc[pivot_zeile, 2:] / copy_tableau.iloc[pivot_zeile,pivot_spalte])\n #neue Basisvariable wird durch alte getauscht\n copy_tableau = update_pivotzeile(copy_tableau, pivot_zeile, pivot_spalte)\n #aktualisiere die restlichen Restritkionsmengen und die Ressourenverbrauchskoeffizienten\n copy_tableau = update_basis_variables(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen)\n return copy_tableau\n\ndef update_pivotzeile(copy_tableau, alte_basis_var, neue_basis_var):\n #aktualisiere den cj Wert der neuen Basisvariable\n copy_tableau.iloc[alte_basis_var, 0] = copy_tableau.iloc[0, neue_basis_var] \n #aktualisiere den Namen der neuen Basisvariable\n copy_tableau.iloc[alte_basis_var, 1] = copy_tableau.iloc[1, neue_basis_var]\n return copy_tableau\n\ndef update_basis_variables(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen): \n for index in copy_tableau.index:\n #wähle jede Zeile der gleich bleibenden Basisvariablen und bringen die Pivotspalte auf 0\n if index != pivot_zeile and index != 0 and index != 1 and index != anzahl_zeilen-1 and index != anzahl_zeilen-2: \n copy_tableau.iloc[index, copy_tableau.columns.difference([0, 1], sort=False)] = copy_tableau.iloc[index, copy_tableau.columns.difference([0,1], sort=False)] - ((copy_tableau.iloc[pivot_zeile, copy_tableau.columns.difference([0, 1], sort=False)] * copy_tableau.iloc[index, pivot_spalte])) \n \n return copy_tableau\n\n#----------------------------------------------------------------------------\ndef get_cj_zj(copy_tableau):\n #print(anzahl_zeilen)\n anzahl_zeilen = len(copy_tableau.index)\n #berechne Zeile zj \n for column in range(0, len(copy_tableau.columns)):\n if column != 0 and column != 1:\n cj_basisvar = copy_tableau.iloc[copy_tableau.index.difference([0,1, anzahl_zeilen-1, anzahl_zeilen-2], sort=False ), 0]\n restr_var = copy_tableau.iloc[copy_tableau.index.difference([0,1, anzahl_zeilen-1, anzahl_zeilen-2], sort=False ), column] \n temp = cj_basisvar * restr_var\n copy_tableau.iloc[-2, column] = temp.sum()\n \n \n \n \n #berechne Zeile cj-zj\n copy_tableau.iloc[-1, copy_tableau.columns.difference([0, 1, 2], sort=False )] = copy_tableau.iloc[0, copy_tableau.columns.difference([0 ,1 ,2], sort=False )] - copy_tableau.iloc[-2, copy_tableau.columns.difference([0, 1,2], sort=False )]\n return copy_tableau\n\n#Berechne maximalen cj-zj Wert\ndef get_max_cj_zj(copy_tableau, infinite):\n copy_tableau = copy.deepcopy(copy_tableau)\n for column in copy_tableau:\n if column != 0 and column != 1 and column != 2:\n if isinstance(copy_tableau.iloc[-1,column], sp.Expr):\n copy_tableau.iloc[-1,column] = copy_tableau.iloc[-1,column].subs(infinite, 9999)\n copy_tableau.iloc[-1,column] = int(copy_tableau.iloc[-1,column])\n max_value = copy_tableau.iloc[-1,3:].astype(float).max(axis=0)\n return max_value\n\n\n#Prüfe auf Ausführbarkeit \ndef check_infeasibility(last_tableau, liste_meldungen, finished):\n #Wenn in der finalen Lösungsmenge ein M ist, ist auch eine künstliche Variable in der Lösung\n #prüfe ob M vorhanden ist und ob eine Lösung gefunden wurde\n if isinstance(last_tableau.iloc[-2,2], sp.Basic) and finished: \n liste_meldungen.append(\"Spezialfall: Unausführbarkeit (Infeasibility) -> Falls ein optimales Tableau eine künstliche Variable enthält, ist das Problem unlösbar („infeasible“).\")\n\n \n#Prüfe auf unbeschraenkten Lösungsraum\ndef check_unbeschraenkter_loesungsraum(check, liste_meldungen):\n #Wenn die Pivotzeile keine Zahl enthält wurde konnte kein Wert berechnet werden\n if np.isnan(check):\n liste_meldungen.append(\"Spezialfall: Unbeschränkter Lösungsraum -> keine zulässige Pivotzeile => Lösungsraum unbeschränkt.\")\n return True\n else:\n return False\n \ndef simplex_algorithm(tableau, counter_limit, infinite):\n anzahl_zeilen = len(tableau.index)\n counter = 0 #Zähler für die Anzahl an Iterationen bis abgebrochen wird\n ende = False #Überprüfung ob der Simplex ein Ergebnis gefunden hat\n Meldungen = [] # Liste für die Fehlermeldung wird erzeugt\n list_pivot_elements = []\n list_tableaus = [copy.deepcopy(tableau.fillna(''))] # Anfangstableau wird in eine liste kopiert\n \n #Solange cj-zj noch einen positiven Wert hat, wird der Simplex Algorithmus ausgeführt\n while get_max_cj_zj(tableau, infinite) > 0 :\n Meldungen.append([]) #erzeuge eine Liste für Meldunge (bezieht sich auf vorheriges Tableau)\n Pivotspalte = get_pivotspalte(tableau, infinite)\n Pivotzeile = get_pivotzeile(tableau, Pivotspalte, anzahl_zeilen)\n list_pivot_elements.append([Pivotzeile, Pivotspalte])\n if check_unbeschraenkter_loesungsraum(Pivotzeile, Meldungen[counter]):\n #wenn der Lösungsraum unbeschränkt ist, wird abgebrochen\n break\n \n update_simplex_tableau(tableau, Pivotzeile, Pivotspalte, anzahl_zeilen)\n tableau = get_cj_zj(tableau)\n\n tableau = tableau.fillna('') #alle unnötigen Felder werden geleert\n list_tableaus.append(copy.deepcopy(tableau)) #füge das neue Tableau wieder in die Liste hinzu\n\n counter += 1\n if counter == counter_limit:\n break\n\n if get_max_cj_zj(tableau, infinite) <= 0:\n #Überprüfung ob ein Ergebnis gefunden wurde\n ende = True\n\n #Meldungen für das letzte Tableau \n Meldungen.append([])\n list_pivot_elements.append([None,None])\n\n # kontrolliere Lösbarkeit\n check_infeasibility(list_tableaus[-1], Meldungen[-1], ende )\n \n return list_tableaus, Meldungen, list_pivot_elements\n \n"
]
| [
[
"numpy.isnan"
]
]
|
mgwillia/vissl | [
"513fc1f4bb8f99161f1cd1d7c23e65ac8016e77f"
]
| [
"vissl/trainer/train_steps/standard_train_step.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nThis is the train step that\"s most commonly used in most of the model trainings.\n\"\"\"\nimport logging\n\nimport contextlib\nfrom types import SimpleNamespace\n\nimport torch\nfrom classy_vision.generic.distributed_util import all_reduce_mean\nfrom classy_vision.tasks import ClassyTask\nfrom classy_vision.tasks.classification_task import AmpType\nfrom vissl.hooks import SSLClassyHookFunctions\nfrom vissl.trainer.train_steps import register_train_step\nfrom vissl.utils.activation_checkpointing import (\n manual_gradient_all_reduce,\n manual_sync_params,\n)\nfrom vissl.utils.misc import is_apex_available\nfrom vissl.utils.perf_stats import PerfTimer\nfrom vissl.utils.profiler import record_function\n\nif is_apex_available():\n import apex\n\n# LastBatchInfo will typically hold\n# the last samples, target, loss and output.\n# More attributes can be added as needed in dependent codeblocks\nLastBatchInfo = SimpleNamespace\n\n\ndef construct_sample_for_model(batch_data, task):\n \"\"\"\n Given the input batch from the dataloader, verify the input is\n as expected: the input data and target data is present in the\n batch.\n In case of multi-input trainings like PIRL, make sure the data\n is in right format i.e. the multiple input should be nested\n under a common key \"input\".\n \"\"\"\n sample_key_names = task.data_and_label_keys\n inp_key, target_key = sample_key_names[\"input\"], sample_key_names[\"target\"]\n all_keys = inp_key + target_key\n\n assert len(inp_key) + len(target_key) <= len(\n batch_data\n ), \"Number of input and target keys in batch and train config don't match.\"\n\n # every input should be a list. The list corresponds to various data sources\n # and hence could be used to handle several data modalities.\n for key in all_keys:\n assert isinstance(batch_data[key], list), f\"key: {key} input is not a list\"\n assert (\n len(batch_data[key]) == 1\n ), \"Please modify your train step to handle multi-modal input\"\n\n # single input case\n if len(sample_key_names[\"input\"]) == 1 and len(sample_key_names[\"target\"]) == 1:\n sample = {\n \"input\": batch_data[inp_key[0]][0],\n \"target\": batch_data[target_key[0]][0],\n \"data_valid\": batch_data[\"data_valid\"][0],\n }\n\n # multi-input case (example in PIRL, we pass image and patches both).\n # we nest all these under the sample[\"input\"]\n elif len(sample_key_names[\"input\"]) > 1:\n sample = {\"input\": {}, \"target\": {}, \"data_valid\": None}\n for key in inp_key:\n sample[\"input\"][key] = batch_data[key][0]\n\n if len(target_key) > 1:\n for key in target_key:\n sample[\"target\"][key] = batch_data[key][0]\n else:\n sample[\"target\"] = batch_data[target_key[0]][0]\n sample[\"data_valid\"] = batch_data[\"data_valid\"][0]\n\n # copy the other keys as-is, method dependent\n for k in batch_data.keys():\n if k not in all_keys:\n sample[k] = batch_data[k]\n\n return sample\n\n\n@register_train_step(\"distill_train_step\")\ndef distill_train_step(task):\n \"\"\"\n Single training iteration loop of the model.\n\n Performs: data read, forward, loss computation, backward, optimizer step, parameter updates.\n\n Various intermediate steps are also performed:\n - logging the training loss, training eta, LR, etc to loggers\n - logging to tensorboard,\n - performing any self-supervised method specific operations (like in MoCo approach, the\n momentum encoder is updated), computing the scores in swav\n - checkpointing model if user wants to checkpoint in the middle\n of an epoch\n \"\"\"\n assert isinstance(task, ClassyTask), \"task is not instance of ClassyTask\"\n\n # reset the last batch info at every step\n task.last_batch = LastBatchInfo()\n\n # We'll time train_step and some of its sections, and accumulate values\n # into perf_stats if it were defined in local_variables:\n perf_stats = task.perf_stats\n timer_train_step = PerfTimer(\"train_step_total\", perf_stats)\n timer_train_step.start()\n\n # Process next sample\n with PerfTimer(\"read_sample\", perf_stats):\n sample = next(task.data_iterator)\n\n sample = task.post_process_batch(sample)\n sample = construct_sample_for_model(sample, task)\n\n # Only need gradients during training\n grad_context = torch.enable_grad() if task.train else torch.no_grad()\n ddp_context = (\n task.model.no_sync()\n if task.enable_manual_gradient_reduction\n else contextlib.suppress()\n )\n torch_amp_context = (\n torch.cuda.amp.autocast()\n if task.amp_type == AmpType.PYTORCH\n else contextlib.suppress()\n )\n\n with grad_context, ddp_context, torch_amp_context:\n # Forward pass of the model\n with PerfTimer(\"forward\", perf_stats), record_function(\"forward\"):\n if task.enable_manual_gradient_reduction:\n # Manually sync params and buffers for DDP.\n manual_sync_params(task.model)\n student_output = task.model(sample[\"input\"])\n with torch.no_grad():\n teacher_output = task.teacher(sample[\"input\"])\n #feats = []\n #feat = task.teacher(torch.cat(sample[\"input\"][:2]))\n #feats.append(feat[0])\n #feat = task.teacher(torch.cat(sample[\"input\"][2:]))\n #feats.append(feat[0])\n #teacher_output = [torch.cat(feats)]\n\n # If the model outputs only one tensor, we take it out of the list.\n if len(student_output) == 1:\n student_output = student_output[0]\n teacher_output = teacher_output[0]\n\n task.last_batch.sample = sample\n task.last_batch.model_output = student_output\n target = sample[\"target\"]\n\n # Run hooks on forward pass\n task.run_hooks(SSLClassyHookFunctions.on_forward.name)\n\n #logging.info(student_output.shape)\n #logging.info(teacher_output.shape)\n\n # Compute loss\n with PerfTimer(\"loss_compute\", perf_stats), record_function(\"loss_compute\"):\n local_loss = task.loss(student_output, teacher_output, target)\n\n # Reduce the loss value across all nodes and gpus.\n with PerfTimer(\"loss_all_reduce\", perf_stats):\n loss = local_loss.detach().clone()\n task.last_batch.loss = all_reduce_mean(loss)\n\n task.losses.append(task.last_batch.loss.data.cpu().item() * target.size(0))\n\n # Update meters\n if len(task.meters) > 0 and (\n (task.train and task.config[\"METERS\"][\"enable_training_meter\"])\n or (not task.train)\n ):\n with PerfTimer(\"meters_update\", perf_stats):\n if isinstance(student_output, list):\n student_output_cpu = [x.cpu() for x in student_output]\n else:\n student_output_cpu = student_output.cpu()\n\n for meter in task.meters:\n meter.update(student_output_cpu, target.detach().cpu())\n\n task.last_batch.model_output = student_output\n task.last_batch.target = target\n\n # Update the iteration number, check loss is not NaN and measure batch time\n # now if it's a test phase since test phase doesn't have update step.\n task.run_hooks(SSLClassyHookFunctions.on_loss_and_meter.name)\n\n # Run backward now and update the optimizer\n if task.train:\n with PerfTimer(\"backward\", perf_stats), record_function(\"backward\"):\n\n task.optimizer.zero_grad()\n if task.amp_type == AmpType.APEX:\n with apex.amp.scale_loss(\n local_loss, task.optimizer.optimizer\n ) as scaled_loss:\n scaled_loss.backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n\n elif task.amp_type == AmpType.PYTORCH:\n task.amp_grad_scaler.scale(local_loss).backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n else:\n local_loss.backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n\n task.run_hooks(SSLClassyHookFunctions.on_backward.name)\n\n # Stepping the optimizer also updates learning rate, momentum etc\n # according to the schedulers (if any).\n with PerfTimer(\"optimizer_step\", perf_stats), record_function(\"optimizer_step\"):\n assert task.where < 1.0, (\n \"Optimizer being called with where=1.0. This should not happen \"\n \"as where=1.0 means training is already finished. Please debug your \"\n \"training setup. A common issue is the data sampler resuming \"\n \"where you are checkpointing model at every iterations but not using \"\n \"the stateful data sampler OR there's an issue in properly resuming the \"\n \"data sampler.\"\n )\n if task.amp_type == AmpType.PYTORCH:\n task.amp_grad_scaler.step(task.optimizer, where=task.where)\n task.amp_grad_scaler.update()\n else:\n task.optimizer.step(where=task.where)\n task.run_hooks(SSLClassyHookFunctions.on_update.name)\n task.num_updates += task.get_global_batchsize()\n\n timer_train_step.stop()\n timer_train_step.record()\n\n return task\n\n\n@register_train_step(\"standard_train_step\")\ndef standard_train_step(task):\n \"\"\"\n Single training iteration loop of the model.\n\n Performs: data read, forward, loss computation, backward, optimizer step, parameter updates.\n\n Various intermediate steps are also performed:\n - logging the training loss, training eta, LR, etc to loggers\n - logging to tensorboard,\n - performing any self-supervised method specific operations (like in MoCo approach, the\n momentum encoder is updated), computing the scores in swav\n - checkpointing model if user wants to checkpoint in the middle\n of an epoch\n \"\"\"\n assert isinstance(task, ClassyTask), \"task is not instance of ClassyTask\"\n\n # reset the last batch info at every step\n task.last_batch = LastBatchInfo()\n\n # We'll time train_step and some of its sections, and accumulate values\n # into perf_stats if it were defined in local_variables:\n perf_stats = task.perf_stats\n timer_train_step = PerfTimer(\"train_step_total\", perf_stats)\n timer_train_step.start()\n\n # Process next sample\n with PerfTimer(\"read_sample\", perf_stats):\n sample = next(task.data_iterator)\n\n sample = task.post_process_batch(sample)\n sample = construct_sample_for_model(sample, task)\n\n # Only need gradients during training\n grad_context = torch.enable_grad() if task.train else torch.no_grad()\n ddp_context = (\n task.model.no_sync()\n if task.enable_manual_gradient_reduction\n else contextlib.suppress()\n )\n torch_amp_context = (\n torch.cuda.amp.autocast()\n if task.amp_type == AmpType.PYTORCH\n else contextlib.suppress()\n )\n\n with grad_context, ddp_context, torch_amp_context:\n # Forward pass of the model\n with PerfTimer(\"forward\", perf_stats), record_function(\"forward\"):\n if task.enable_manual_gradient_reduction:\n # Manually sync params and buffers for DDP.\n manual_sync_params(task.model)\n model_output = task.model(sample[\"input\"])\n\n # If the model outputs only one tensor, we take it out of the list.\n if len(model_output) == 1:\n model_output = model_output[0]\n\n task.last_batch.sample = sample\n task.last_batch.model_output = model_output\n target = sample[\"target\"]\n\n # Run hooks on forward pass\n task.run_hooks(SSLClassyHookFunctions.on_forward.name)\n\n #logging.info(model_output.shape)\n\n # Compute loss\n with PerfTimer(\"loss_compute\", perf_stats), record_function(\"loss_compute\"):\n local_loss = task.loss(model_output, target)\n\n # Reduce the loss value across all nodes and gpus.\n with PerfTimer(\"loss_all_reduce\", perf_stats):\n loss = local_loss.detach().clone()\n task.last_batch.loss = all_reduce_mean(loss)\n\n task.losses.append(task.last_batch.loss.data.cpu().item() * target.size(0))\n\n # Update meters\n if len(task.meters) > 0 and (\n (task.train and task.config[\"METERS\"][\"enable_training_meter\"])\n or (not task.train)\n ):\n with PerfTimer(\"meters_update\", perf_stats):\n if isinstance(model_output, list):\n model_output_cpu = [x.cpu() for x in model_output]\n else:\n model_output_cpu = model_output.cpu()\n\n for meter in task.meters:\n meter.update(model_output_cpu, target.detach().cpu())\n\n task.last_batch.model_output = model_output\n task.last_batch.target = target\n\n # Update the iteration number, check loss is not NaN and measure batch time\n # now if it's a test phase since test phase doesn't have update step.\n task.run_hooks(SSLClassyHookFunctions.on_loss_and_meter.name)\n\n # Run backward now and update the optimizer\n if task.train:\n with PerfTimer(\"backward\", perf_stats), record_function(\"backward\"):\n\n task.optimizer.zero_grad()\n if task.amp_type == AmpType.APEX:\n with apex.amp.scale_loss(\n local_loss, task.optimizer.optimizer\n ) as scaled_loss:\n scaled_loss.backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n\n elif task.amp_type == AmpType.PYTORCH:\n task.amp_grad_scaler.scale(local_loss).backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n else:\n local_loss.backward()\n if task.enable_manual_gradient_reduction:\n manual_gradient_all_reduce(task.model)\n\n task.run_hooks(SSLClassyHookFunctions.on_backward.name)\n\n # Stepping the optimizer also updates learning rate, momentum etc\n # according to the schedulers (if any).\n with PerfTimer(\"optimizer_step\", perf_stats), record_function(\"optimizer_step\"):\n assert task.where < 1.0, (\n \"Optimizer being called with where=1.0. This should not happen \"\n \"as where=1.0 means training is already finished. Please debug your \"\n \"training setup. A common issue is the data sampler resuming \"\n \"where you are checkpointing model at every iterations but not using \"\n \"the stateful data sampler OR there's an issue in properly resuming the \"\n \"data sampler.\"\n )\n if task.amp_type == AmpType.PYTORCH:\n task.amp_grad_scaler.step(task.optimizer, where=task.where)\n task.amp_grad_scaler.update()\n else:\n task.optimizer.step(where=task.where)\n task.run_hooks(SSLClassyHookFunctions.on_update.name)\n task.num_updates += task.get_global_batchsize()\n\n timer_train_step.stop()\n timer_train_step.record()\n\n return task\n"
]
| [
[
"torch.enable_grad",
"torch.cuda.amp.autocast",
"torch.no_grad"
]
]
|
jvishnuvardhan/agents | [
"b1c50a955876c16eac03ad1274c455a2d0c9cc7d"
]
| [
"tf_agents/policies/py_tf_eager_policy.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Converts tf_policies when working in eager mode to py_policies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom typing import Optional, Text\nfrom absl import logging\n\nimport gin\nimport tensorflow as tf\n\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.policies import py_policy\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\n\n\[email protected]\nclass PyTFEagerPolicyBase(py_policy.PyPolicy):\n \"\"\"Base class for py_policy instances of TF policies in Eager mode.\n\n Handles adding and removing batch dimensions from the actions and time_steps.\n Note if you have a tf_policy you should directly use the PyTFEagerPolicy class\n instead of this Base.\n \"\"\"\n\n def __init__(self,\n policy: tf_policy.TFPolicy,\n time_step_spec: ts.TimeStep,\n action_spec: types.NestedArraySpec,\n policy_state_spec: types.NestedArraySpec,\n info_spec: types.NestedArraySpec,\n use_tf_function: bool = False,\n batch_time_steps=True):\n \"\"\"Creates a new instance of the policy.\n\n Args:\n policy: `tf_policy.TFPolicy` instance to wrap and expose as a py_policy.\n time_step_spec: A `TimeStep` ArraySpec of the expected time_steps. Usually\n provided by the user to the subclass.\n action_spec: A nest of BoundedArraySpec representing the actions. Usually\n provided by the user to the subclass.\n policy_state_spec: A nest of ArraySpec representing the policy state.\n Provided by the subclass, not directly by the user.\n info_spec: A nest of ArraySpec representing the policy info. Provided by\n the subclass, not directly by the user.\n use_tf_function: Wraps the use of `policy.action` in a tf.function call\n which can help speed up execution.\n batch_time_steps: Wether time_steps should be batched before being passed\n to the wrapped policy. Leave as True unless you are dealing with a\n batched environment, in which case you want to skip the batching as\n that dim will already be present.\n \"\"\"\n self._policy = policy\n self._use_tf_function = use_tf_function\n if self._use_tf_function:\n self._policy_action_fn = common.function(policy.action)\n else:\n self._policy_action_fn = policy.action\n self._batch_time_steps = batch_time_steps\n super(PyTFEagerPolicyBase, self).__init__(time_step_spec, action_spec,\n policy_state_spec, info_spec)\n\n def variables(self):\n return tf.nest.map_structure(lambda t: t.numpy(), self._policy.variables())\n\n def _get_initial_state(self, batch_size):\n if batch_size is None:\n batch_size = 0\n return self._policy.get_initial_state(batch_size=batch_size)\n\n def _action(self, time_step, policy_state, seed: Optional[types.Seed] = None):\n if seed is not None and self._use_tf_function:\n logging.warning(\n 'Using `seed` may force a retrace for each call to `action`.')\n if self._batch_time_steps:\n time_step = nest_utils.batch_nested_array(time_step)\n # Avoid passing numpy arrays to avoid retracing of the tf.function.\n time_step = tf.nest.map_structure(tf.convert_to_tensor, time_step)\n if seed is not None:\n policy_step = self._policy_action_fn(time_step, policy_state, seed=seed)\n else:\n policy_step = self._policy_action_fn(time_step, policy_state)\n if not self._batch_time_steps:\n return policy_step\n return policy_step._replace(\n action=nest_utils.unbatch_nested_tensors_to_arrays(policy_step.action),\n # We intentionally do not convert the `state` so it is outputted as the\n # underlying policy generated it (i.e. in the form of a Tensor) which is\n # not necessarily compatible with a py-policy. However, we do so since\n # the `state` is fed back to the policy. So if it was converted, it'd be\n # required to convert back to the original form before calling the\n # method `action` of the policy again in the next step. If one wants to\n # store the `state` e.g. in replay buffer, then we suggest placing it\n # into the `info` field.\n info=nest_utils.unbatch_nested_tensors_to_arrays(policy_step.info))\n\n\[email protected]\nclass PyTFEagerPolicy(PyTFEagerPolicyBase):\n \"\"\"Exposes a numpy API for TF policies in Eager mode.\"\"\"\n\n def __init__(self,\n policy: tf_policy.TFPolicy,\n use_tf_function: bool = False,\n batch_time_steps=True):\n time_step_spec = tensor_spec.to_nest_array_spec(policy.time_step_spec)\n action_spec = tensor_spec.to_nest_array_spec(policy.action_spec)\n policy_state_spec = tensor_spec.to_nest_array_spec(policy.policy_state_spec)\n info_spec = tensor_spec.to_nest_array_spec(policy.info_spec)\n super(PyTFEagerPolicy,\n self).__init__(policy, time_step_spec, action_spec, policy_state_spec,\n info_spec, use_tf_function, batch_time_steps)\n\n\[email protected]\nclass SavedModelPyTFEagerPolicy(PyTFEagerPolicyBase):\n \"\"\"Exposes a numpy API for saved_model policies in Eager mode.\"\"\"\n\n def __init__(self,\n model_path: Text,\n time_step_spec: Optional[ts.TimeStep] = None,\n action_spec: Optional[types.NestedTensorSpec] = None,\n policy_state_spec: types.NestedTensorSpec = (),\n info_spec: types.NestedTensorSpec = (),\n load_specs_from_pbtxt: bool = False):\n \"\"\"Initializes a PyPolicy from a saved_model.\n\n *Note* (b/151318119): BoundedSpecs are converted to regular specs when saved\n into a proto as the `nested_structure_coder` from TF currently doesn't\n handle BoundedSpecs. Shape and dtypes will still match the original specs.\n\n Args:\n model_path: Path to a saved_model generated by the `policy_saver`.\n time_step_spec: Optional nested structure of ArraySpecs describing the\n policy's `time_step_spec`. This is not used by the\n SavedModelPyTFEagerPolicy, but may be accessed by other objects as it is\n part of the public policy API.\n action_spec: Optional nested structure of `ArraySpecs` describing the\n policy's `action_spec`. This is not used by the\n SavedModelPyTFEagerPolicy, but may be accessed by other objects as it is\n part of the public policy API.\n policy_state_spec: Optional nested structure of `ArraySpecs` describing\n the policy's `policy_state_spec`. This is not used by the\n SavedModelPyTFEagerPolicy, but may be accessed by other objects as it is\n part of the public policy API.\n info_spec: Optional nested structure of `ArraySpecs` describing the\n policy's `info_spec`. This is not used by the SavedModelPyTFEagerPolicy,\n but may be accessed by other objects as it is part of the public policy\n API.\n load_specs_from_pbtxt: If True the specs will be loaded from the proto\n file generated by the `policy_saver`.\n \"\"\"\n policy = tf.compat.v2.saved_model.load(model_path)\n self._checkpoint = tf.train.Checkpoint(policy=policy)\n if not (time_step_spec or load_specs_from_pbtxt):\n raise ValueError(\n 'To load a SavedModel policy you have to provide the specs, or'\n 'enable loading from proto.')\n policy_specs = None\n if not time_step_spec and load_specs_from_pbtxt:\n spec_path = os.path.join(model_path, policy_saver.POLICY_SPECS_PBTXT)\n policy_specs = policy_saver.specs_from_collect_data_spec(\n tensor_spec.from_pbtxt_file(spec_path))\n time_step_spec = policy_specs['time_step_spec']\n action_spec = policy_specs['action_spec']\n policy_state_spec = policy_specs['policy_state_spec']\n info_spec = policy_specs['info_spec']\n super(SavedModelPyTFEagerPolicy,\n self).__init__(policy, time_step_spec, action_spec, policy_state_spec,\n info_spec)\n # Override collect data_spec with whatever was loaded instead of relying\n # on trajectory_data_spec.\n if policy_specs:\n self._collect_data_spec = policy_specs['collect_data_spec']\n\n def get_train_step(self) -> types.Int:\n \"\"\"Returns the training global step of the saved model.\"\"\"\n return self._policy.get_train_step().numpy()\n\n def get_metadata(self):\n \"\"\"Returns the metadata of the saved model.\"\"\"\n return self._policy.get_metadata()\n\n def variables(self):\n return self._policy.model_variables\n\n def update_from_checkpoint(self, checkpoint_path: Text):\n \"\"\"Allows users to update saved_model variables directly from a checkpoint.\n\n `checkpoint_path` is a path that was passed to either `PolicySaver.save()`\n or `PolicySaver.save_checkpoint()`. The policy looks for set of checkpoint\n files with the file prefix `<checkpoint_path>/variables/variables'\n\n Args:\n checkpoint_path: Path to the checkpoint to restore and use to udpate this\n policy.\n \"\"\"\n file_prefix = os.path.join(checkpoint_path,\n tf.saved_model.VARIABLES_DIRECTORY,\n tf.saved_model.VARIABLES_FILENAME)\n status = self._checkpoint.read(file_prefix)\n # Check that all the variables in the policy were updated, but allow the\n # checkpoint to have additional variables. This helps sharing checkpoints\n # across policies.\n status.assert_existing_objects_matched().expect_partial()\n\n def __getattr__(self, name: Text):\n \"\"\"Forward all other calls to the loaded policy.\"\"\"\n return getattr(self._policy, name)\n"
]
| [
[
"tensorflow.compat.v2.saved_model.load",
"tensorflow.train.Checkpoint",
"tensorflow.nest.map_structure"
]
]
|
USDepartmentofLabor/Binary-Context-Transformer | [
"b80c26b82ab67e0d0bd728a7940685d160ae742d"
]
| [
"binarycontexttransformer.py"
]
| [
"import numpy as np\r\nimport scipy as sp\r\nfrom sklearn.base import TransformerMixin\r\nfrom scipy.sparse import csc_matrix, csr_matrix\r\n\r\n\r\nclass BinaryContextTransformer(TransformerMixin):\r\n \"\"\"\r\n Expands base features into interaction terms when they appear with\r\n different context features. Base features are variables that may have different\r\n meanings in different contexts. Context features are indicator variables that\r\n denote which context a record belongs to. Both base features and context features\r\n must be binary.\r\n \"\"\"\r\n\r\n def __init__(self, features, contexts, progress=None):\r\n \"\"\"\r\n Args:\r\n features: names of base feature columns for input matrix\r\n contexts: names of context feature columns for input matrix\r\n progress: function of format progress_fn(iter, total) that takes\r\n an iterable and an integer with the total number of items and\r\n returns a generator to track progress at each step of the\r\n iterable (default=None)\r\n \"\"\"\r\n self.features = features\r\n self.contexts = contexts\r\n self.col_pairs = []\r\n self.progress = progress\r\n self.vocabulary = {}\r\n\r\n def fit(self, X, X_context):\r\n \"\"\"\r\n Args:\r\n X: input matrix, base feature columns\r\n X_context: input matrix, context feature columns\r\n \"\"\"\r\n assert X.shape[1] == len(self.features), \"X not same size as base.\"\r\n assert X_context.shape[1] == len(\r\n self.contexts\r\n ), \"X_context not same size as context.\"\r\n if not isinstance(X, csc_matrix):\r\n X = csc_matrix(X)\r\n if not isinstance(X_context, csc_matrix):\r\n X_context = csc_matrix(X_context)\r\n looper = range(X_context.shape[1])\r\n if self.progress is not None:\r\n looper = self.progress(looper, total=X_context.shape[1])\r\n # Find possible interactions from the sparse input matrix.\r\n blocks = []\r\n # If each record appears in only one context, the runtime complexity\r\n # of this loop is O(S), where S = the number of entries in the sparse\r\n # matrix. Each row will be selected only once and the call to max()\r\n # for a sparse matrix will only consider nonzero entries in the row.\r\n # For sparse matrices, N < S << N x B.\r\n for i in looper:\r\n # Get row indices of records that match context i\r\n row_list = X_context[:, i].indices\r\n if len(row_list) > 0:\r\n # Squash rows into binary mask for each feature\r\n # 1 if feature and context co-occur, 0 otherwise\r\n row_vals = X[row_list, :].max(axis=0)\r\n blocks.append(row_vals)\r\n # The variable `S` is a matrix where each row is a context and each\r\n # column is a feature, nonzero entries are possible interactions.\r\n S = sp.sparse.vstack(blocks)\r\n # Get column indices of features that occur in at least 2 contexts\r\n feature_idxs = csr_matrix(S.sum(axis=0) - 1).indices\r\n S = csc_matrix(S)\r\n # Make vocabulary\r\n col_pairs = []\r\n vocab = {}\r\n k = 0\r\n # The runtime complexity of this loop is O(V), where V is the number\r\n # of interaction terms in the resulting vocabulary. In the worst case,\r\n # when every feature appears in every context, V = B x C. When interactions\r\n # are sparse, V << B x C.\r\n looper = feature_idxs\r\n if self.progress is not None:\r\n looper = self.progress(looper, total=len(feature_idxs))\r\n for j in looper:\r\n context_idcs = S[:, j].indices\r\n for i in context_idcs:\r\n col_pairs.append((i, j))\r\n feature_name = self.features[j]\r\n context_name = self.contexts[i]\r\n name = context_name + \"_x_\" + feature_name\r\n vocab[name] = k\r\n k += 1\r\n self.col_pairs = col_pairs\r\n self.vocabulary = vocab\r\n # Check that vocabulary is correct size, sizes will not match\r\n # if features or contexts contain duplicate feature names.\r\n # This may occur when joining multiple vocabularies to form\r\n # the base feature names.\r\n msg_len = (\r\n \"Length of `vocab` does not match `col_pairs`. \",\r\n \"Check for duplicate feature names.\",\r\n )\r\n assert len(col_pairs) == len(vocab), msg_len\r\n return self\r\n\r\n def transform(self, X, X_context):\r\n \"\"\"\r\n Args:\r\n X: input matrix, base feature columns\r\n X_context: input matrix, context feature columns\r\n \"\"\"\r\n assert X.shape[1] == len(self.features), \"X not same size as base.\"\r\n assert X_context.shape[1] == len(\r\n self.contexts\r\n ), \"X_context not same size as context.\"\r\n if not isinstance(X, csr_matrix):\r\n X = csr_matrix(X)\r\n if not isinstance(X_context, csr_matrix):\r\n X_context = csr_matrix(X_context)\r\n n = X.shape[0]\r\n m = len(self.col_pairs)\r\n data = []\r\n cols = []\r\n rows = []\r\n val = 1\r\n\r\n # The runtime complexity of this loop is O(V). See `fit` method\r\n # for notes on V, the size of the fitted vocabulary.\r\n col_pair_map = {}\r\n for k, (i, j) in enumerate(self.col_pairs):\r\n col_pair_map[(i, j)] = k\r\n looper = range(n)\r\n if self.progress is not None:\r\n looper = self.progress(looper, total=n)\r\n # If each record appears in only one context, the runtime complexity\r\n # of this loop is O(S) where S is the number of entries in the sparse\r\n # matrix. See `fit` method for notes on S.\r\n for r in looper:\r\n contexts = X_context[r, :].indices\r\n features = X[r, :].indices\r\n for i in contexts:\r\n for j in features:\r\n pair = (i, j)\r\n if pair in col_pair_map:\r\n k = col_pair_map[pair]\r\n data.append(val)\r\n rows.append(r)\r\n cols.append(k)\r\n\r\n mat = csc_matrix((data, (rows, cols)), shape=(n, m), dtype=np.int8)\r\n return mat\r\n\r\n def fit_transform(self, X, X_context):\r\n \"\"\"\r\n Args:\r\n X: input matrix, base feature columns\r\n X_context: input matrix, context feature columns\r\n \"\"\"\r\n assert X.shape[1] == len(self.features), \"X not same size as base.\"\r\n assert X_context.shape[1] == len(\r\n self.contexts\r\n ), \"X_context not same size as context.\"\r\n self.fit(X, X_context)\r\n return self.transform(X, X_context)\r\n\r\n def get_feature_names(self):\r\n \"\"\"\r\n Returns a list of feature names corresponding to column indices.\r\n \"\"\"\r\n vocab = sorted(self.vocabulary.items(), key=lambda p: p[1])\r\n return [name for name, i in vocab]\r\n"
]
| [
[
"scipy.sparse.csc_matrix",
"scipy.sparse.vstack",
"scipy.sparse.csr_matrix"
]
]
|
stoplime/OpenAudioAI | [
"4ed6fb78b976fd7c4d2b4661849562b28003a2ed"
]
| [
"dialoguePytorch/model.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\nclass ABHUE(nn.Module):\n ''' Attention-Based Heirarchical Utterance Embedding\n '''\n def __init__(self, recurrent_model=\"lstm\", dropout=0, stack_size=1, dev=torch.device(\"cpu\")):\n super(ABHUE, self).__init__()\n self.device = dev\n self.input_size = 200\n self.hidden_size = 200\n self.stack_size = stack_size\n self.isLSTM = (recurrent_model == \"lstm\")\n if self.isLSTM:\n self.context_rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)\n self.target_rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)\n\n self.prev_rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n self.post_rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n else:\n self.context_rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)\n self.target_rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)\n\n self.prev_rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n self.post_rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n \n self.fc = nn.Linear(self.hidden_size*2, self.input_size)\n\n self.context_rnn = self.context_rnn.to(self.device)\n self.target_rnn = self.target_rnn.to(self.device)\n\n self.prev_rnn = self.prev_rnn.to(self.device)\n self.post_rnn = self.post_rnn.to(self.device)\n\n self.fc = self.fc.to(self.device)\n\n def create_hidden(self, length, stack=False):\n if self.isLSTM:\n if not stack:\n hidden = (torch.randn(1, 1, length).to(self.device), torch.randn(1, 1, length).to(self.device))\n else:\n hidden = (torch.randn(self.stack_size, 1, length).to(self.device), torch.randn(self.stack_size, 1, length).to(self.device))\n else:\n if not stack:\n hidden = torch.randn(1, 1, length).to(self.device)\n else:\n hidden = torch.randn(self.stack_size, 1, length).to(self.device)\n return hidden\n\n def reset_gradients(self):\n self.context_rnn.zero_grad()\n self.target_rnn.zero_grad()\n self.prev_rnn.zero_grad()\n self.post_rnn.zero_grad()\n self.fc.zero_grad()\n\n # @profile\n def forward(self, sentences):\n '''\n # sentences: [sentence, word, direction*layers, batch, embedding]\n sentences: [[embedding] of len words] of len sentences\n '''\n sentence_embedding = []\n for i, sentence in enumerate(sentences):\n hidden = self.create_hidden(self.hidden_size)\n if i == ((len(sentences) - 1) / 2):\n for word in sentence:\n out, hidden = self.target_rnn(word, hidden)\n else:\n for word in sentence:\n out, hidden = self.context_rnn(word, hidden)\n del hidden\n sentence_embedding.append(out)\n\n hidden = self.create_hidden(self.hidden_size, stack=True)\n for i, s_embed in enumerate(sentence_embedding):\n prev_out, hidden = self.prev_rnn(s_embed, hidden)\n if i == ((len(sentence_embedding) - 1) / 2):\n break\n # hidden = hidden.detach()\n del hidden\n\n hidden = self.create_hidden(self.hidden_size, stack=True)\n for i, s_embed in reversed(list(enumerate(sentence_embedding))):\n post_out, hidden = self.post_rnn(s_embed, hidden)\n if i == ((len(sentence_embedding) - 1) / 2):\n break\n # hidden = hidden.detach()\n del hidden\n\n feature_vec = torch.squeeze(torch.cat((prev_out, post_out), 2))\n prediction = self.fc(feature_vec)\n del feature_vec\n\n return prediction\n\nclass GlobalModule(nn.Module):\n ''' The Global Module of the Attention-Based Heirarchical Utterance Embedding\n '''\n def __init__(self, recurrent_model=\"lstm\", dropout=0, stack_size=1, dev=torch.device(\"cpu\")):\n super(GlobalModule, self).__init__()\n self.device = dev\n self.local_prediction_size = 200\n self.hidden_size = 200\n self.stack_size = stack_size\n self.isLSTM = (recurrent_model == \"lstm\")\n if self.isLSTM:\n self.global_rnn = nn.LSTM(input_size=self.local_prediction_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n else:\n self.global_rnn = nn.GRU(input_size=self.local_prediction_size, hidden_size=self.hidden_size, batch_first=True, dropout=dropout, num_layers=stack_size)\n\n self.global_rnn = self.global_rnn.to(self.device)\n\n self.hidden = None\n\n def create_hidden(self, length, stack=False):\n if self.isLSTM:\n if not stack:\n hidden = (torch.randn(1, 1, length).to(self.device), torch.randn(1, 1, length).to(self.device))\n else:\n hidden = (torch.randn(self.stack_size, 1, length).to(self.device), torch.randn(self.stack_size, 1, length).to(self.device))\n else:\n if not stack:\n hidden = torch.randn(1, 1, length).to(self.device)\n else:\n hidden = torch.randn(self.stack_size, 1, length).to(self.device)\n return hidden\n\n def reset_gradients(self):\n self.global_rnn.zero_grad()\n if type(self.hidden) == tuple:\n self.hidden = (self.hidden[0].detach(), self.hidden[1].detach())\n else:\n self.hidden = self.hidden.detach()\n\n def forward(self, local_prediction):\n '''\n local_prediction: tensor(200)\n '''\n local_prediction = local_prediction.unsqueeze(0).unsqueeze(0)\n self.hidden = self.create_hidden(self.hidden_size, stack=True)\n global_pred, self.hidden = self.global_rnn(local_prediction, self.hidden)\n\n return global_pred.squeeze()"
]
| [
[
"torch.nn.Linear",
"torch.device",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.randn"
]
]
|
Doswind/pytorch | [
"ba14f9d9b00e608d4857c5101ed4465500fba79e"
]
| [
"torch/testing/_internal/common_methods_invocations.py"
]
| [
"from functools import wraps, partial\nfrom itertools import product, chain\nimport itertools\nimport collections\nimport copy\nfrom enum import Enum\nimport operator\nimport random\nimport unittest\nimport math\n\nimport torch\nimport numpy as np\nfrom torch._six import inf\nimport collections.abc\n\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nfrom torch.testing import make_non_contiguous, make_tensor\nfrom torch.testing._internal.common_dtype import (\n _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types,\n floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and,\n all_types, double_types, empty_types\n)\nfrom torch.testing._internal.common_device_type import \\\n (onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride,\n toleranceOverride, tol, has_cusolver)\nfrom torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater\nfrom torch.testing._internal.common_utils import \\\n (is_iterable_of_tensors,\n random_symmetric_matrix, random_symmetric_psd_matrix,\n make_fullrank_matrices_with_distinct_singular_values,\n random_symmetric_pd_matrix, make_symmetric_matrices,\n make_symmetric_pd_matrices, random_square_matrix_of_rank,\n random_fullrank_matrix_distinct_singular_value,\n TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,\n torch_to_numpy_dtype_dict, TEST_WITH_ASAN,\n GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like)\nimport torch.testing._internal.opinfo_helper as opinfo_helper\n\nfrom setuptools import distutils\n\nhas_scipy_fft = False\nif TEST_SCIPY:\n import scipy.special\n try:\n import scipy.fft\n has_scipy_fft = True\n except ModuleNotFoundError:\n pass\n\n\n# Reasonable testing sizes for dimensions\nL = 20\nM = 10\nS = 5\n\n# Unique value to distinguish default from anything else\n_NOTHING = object()\n\n\nclass DecorateInfo(object):\n \"\"\"Describes which test, or type of tests, should be wrapped in the given\n decorators when testing an operator. Any test that matches all provided\n arguments will be decorated. The decorators will only be applied if the\n active_if argument is True.\"\"\"\n\n __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']\n\n def __init__(self, decorators, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]\n self.cls_name = cls_name\n self.test_name = test_name\n self.device_type = device_type\n self.dtypes = dtypes\n self.active_if = active_if\n\n def is_active(self, cls_name, test_name, device_type, dtype):\n return (\n self.active_if and\n (self.cls_name is None or self.cls_name == cls_name) and\n (self.test_name is None or self.test_name == test_name) and\n (self.device_type is None or self.device_type == device_type) and\n (self.dtypes is None or dtype in self.dtypes)\n )\n\n\nclass SampleInput(object):\n \"\"\"Represents sample inputs to a function.\"\"\"\n\n __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']\n\n def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=\"\"):\n # input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).\n # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).\n # op with TensorList inputs do not support method or inplace variants.\n assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)\n self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input\n self.args = args\n self.kwargs = kwargs if kwargs is not None else {}\n self.output_process_fn_grad = output_process_fn_grad\n self.name = name\n\n # Specifies if `self.input` is broadcasted or not,\n # given that the operator supports broadcasting.\n # This field is used to verify the behavior for inplace variant.\n #\n # If a SampleInput is marked with `broadcasts_input=True`,\n # it is verified that we get a `RuntimerError` with this sample,\n # and inplace variant. Also inplace grad{grad} tests are skipped,\n # for such inputs (as they will error out otherwise).\n self.broadcasts_input = broadcasts_input\n\n def _repr_helper(self, formatter):\n # Helper function to return the details of the SampleInput as `str`\n # It consolidates all the fields of SampleInput and allows,\n # formatting the fields like `input`, `args`, etc with `formatter`\n # callable to customize the representation.\n # Look at `summary` method for example.\n arguments = [\n f'input={formatter(self.input)}',\n f'args={formatter(self.args)}',\n f'kwargs={formatter(self.kwargs)}',\n f'output_process_fn_grad={self.output_process_fn_grad}',\n f'broadcasts_input={self.broadcasts_input}',\n f'name={repr(self.name)}']\n\n return f'SampleInput({\", \".join(a for a in arguments if a is not None)})'\n\n def __repr__(self):\n return self._repr_helper(lambda x: x)\n\n def summary(self):\n # Returns the SampleInput details in a more\n # friendly format.\n # It formats `Tensor` and `TensorList`\n # in a more condensed representation.\n def formatter(arg):\n # Format any instance of `Tensor` (standalone, in list, or in dict)\n # by Tensor[TensorShape]\n # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]\n if isinstance(arg, torch.Tensor):\n shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')\n return f\"Tensor[{shape}]\"\n elif isinstance(arg, dict):\n return {k: formatter(v) for k, v in arg.items()}\n elif is_iterable_of_tensors(arg):\n return \"TensorList[\" + \", \".join(map(formatter, arg)) + \"]\"\n elif isinstance(arg, (list, tuple)): # Handle list, tuple\n return \"(\" + \",\".join(map(formatter, arg)) + \")\"\n\n return repr(arg)\n\n return self._repr_helper(formatter)\n\n # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput\n def transform(self, f):\n def tt(t):\n def _tt(t):\n return f(t)\n\n if isinstance(t, torch.Tensor):\n return _tt(t)\n elif isinstance(t, torch.dtype):\n return _tt(t)\n elif isinstance(t, list):\n return list(map(tt, t))\n elif isinstance(t, tuple):\n return tuple(map(tt, t))\n elif isinstance(t, dict):\n return {k: tt(v) for k, v in t.items()}\n else:\n return t\n\n sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)\n return (sample_tt_input, tt_args, tt_kwargs)\n\n # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)\n # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them\n # Converts dtypes by remapping them using torch_to_numpy_dtype_dict\n def numpy(self):\n def to_numpy(t):\n if isinstance(t, torch.Tensor):\n return t.detach().cpu().numpy()\n elif isinstance(t, torch.dtype):\n return torch_to_numpy_dtype_dict[t]\n\n return self.transform(to_numpy)\n\n def noncontiguous(self):\n def to_noncontiguous(t):\n if isinstance(t, torch.Tensor):\n return noncontiguous_like(t)\n if isinstance(t, torch.dtype):\n return t\n\n return self.transform(to_noncontiguous)\n\n\nclass ErrorInput(object):\n \"\"\"\n A SampleInput that will cause the operation to throw an error plus information\n about the resulting error.\n \"\"\"\n\n __slots__ = ['sample_input', 'error_type', 'error_regex']\n\n def __init__(self, sample_input, *, error_type, error_regex):\n self.sample_input = sample_input\n self.error_type = error_type\n self.error_regex = error_regex\n\n\nclass AliasInfo(object):\n \"\"\"Class holds alias information. For example, torch.abs ->\n torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_\n \"\"\"\n\n def __init__(self, alias_name):\n self.name = alias_name\n self.op = _getattr_qual(torch, alias_name)\n self.method_variant = getattr(torch.Tensor, alias_name, None)\n self.inplace_variant = getattr(torch.Tensor, alias_name + \"_\", None)\n\n def __call__(self, *args, **kwargs):\n return self.op(*args, **kwargs)\n\n\n# Extension of getattr to support qualified names\n# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm\ndef _getattr_qual(obj, name, default=_NOTHING):\n try:\n for path in name.split('.'):\n obj = getattr(obj, path)\n return obj\n except AttributeError:\n if default is not _NOTHING:\n return default\n else:\n raise\n\n\n# test if a tensor is close to an integer\ndef close_to_int(x, eps=0.1):\n if x.is_complex():\n y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x))))\n else:\n y = torch.abs(torch.frac(x))\n return (y < eps) | (y > (1 - eps))\n\n\nNumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val'])\n\n\n# Note [OpInfos]\n# ~~~~~~~~~~~~~~\n#\n# The majority of this note was written shortly after the PyTorch 1.9 release.\n# If you notice it's out-of-date or think it could be improved then please\n# file an issue.\n#\n# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)\n# See also: \"Writing Test Templates\" in common_device_type.py to learn how to\n# parametrize a test template using OpInfos.\n# See also: PyTorch's GitHub wiki on running and writing tests\n# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests\n# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py\n#\n# An OpInfo is a collection of metadata related to a PyTorch operator. This\n# metadata is used to generate tests that validate properties of the operator,\n# like if it implements the correct gradient formula.\n#\n# WHY OPINFOS?\n# ~~~~~~~~~~~~\n#\n# OpInfos are principally intended to do three things:\n#\n# 1) to allow systematic testing over all PyTorch's operators\n# 2) to simplify operating testing by autogenerating many tests\n# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test\n# against every PyTorch operator\n#\n# All these goals are still a work in progress. Not every operator has an\n# OpInfo, and some operator tests that could be automatically generated\n# still have to be written manually.\n#\n# It's helpful to understand that OpInfos are both about test simplification and\n# modularity. PyTorch is a complicated framework with many interrelated systems,\n# too many for any one person to keep track of. An OpInfo can be thought of as the\n# interface between an operator implementer and those other systems. Instead of\n# requiring the implementer of torch.foo understand how to test its forward\n# mode AD or NNC support that's typically handled automatically just by\n# defining an OpInfo.\n#\n# It's often surprising to OpInfo writers that just implementing an OpInfo\n# typically can't verify an operator is actually implemented correctly:\n#\n# \"If an OpInfo doesn't validate my op works as expected, what's the point\n# of it?\"\n#\n# But the point of is the above. OpInfos are intended to let you focus on testing\n# the operator logic you're familiar with instead of having to write tests for\n# how the operator interacts with each of PyTorch's many systems.\n#\n# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES\n# validate your op works as expected, but that's only in special\n# cases. See below for details.\n#\n# WHAT'S AN OPINFO?\n# ~~~~~~~~~~~~~~~~~\n#\n# So what is an OpInfo? It's a Python class that describes an operator's properties,\n# like which dtypes it supports on the CPU and whether it has any aliases.\n# These properties can be divided into three categories:\n#\n# 1) Metadata describing the operator, like the operator's name and if it\n# \"supports\" the out kwarg.\n# 2) Test directives, like \"skips\" that tell the test suite to skip some\n# tests.\n# 3) A \"sample inputs\" function that generates valid inputs for the operator.\n#\n# OpInfo attributes are described in more detail below.\n#\n# THE SAMPLE INPUTS FUNCTION\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# The \"sample inputs\" function merits special elaboration. This function is\n# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator\n# as a black box. There's no structure for the test to understand or exploit.\n# Without \"sample inputs\" it wouldn't even know how to call the OpInfo's\n# operator. The sample input function saves the day by providing different\n# \"SampleInputs\" that can be used to call the operator. A sample input\n# function should have the following signature:\n#\n# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):\n#\n# And should return a list of SampleInputs (see the class description above).\n# Each SampleInput defines an \"input\", \"args\", \"kwargs\",\n# an \"output_process_fn_grad\" function, the \"broadcasts_input\" bool and\n# a \"name\".\n#\n# The \"input\" is the first argument to the operator, or the tensor that\n# the method or inplace variants of the operator should be called on, and\n# should be on the requested device, of the requested dtype, and its\n# requires_grad attribute should be set to the requires_grad argument.\n#\n# \"args\" should contain positional arguments, and \"kwargs\" keyword arguments.\n#\n# \"output_process_fn_grad\" has an interesting name. It's a function that maps\n# the operator's output (when given the input, args, and kwargs) to the\n# portion of the output to gradcheck. For example, consider an operator\n# like torch.linalg.slogdet\n# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).\n# This operator returns a tuple of two tensors, but the first tensor\n# cannot be backwarded through. Its \"output_process_fn_grad\" filters\n# this output tuple to just the second argument, which we can call backward\n# on. Functions that produce a single tensor can ignore this argument.\n#\n# \"broadcasts_input\" is a bool indicated if the SampleInput causes the operator\n# to broadcast the \"input\" argument. This is important for tests to understand\n# because inplace variants of operations throw a runtime error if they\n# would broadcast their input arguments, so tests that work with inplace\n# variants filter SampleInputs that broadcast their input.\n#\n# \"name\" is a string that's just used for debugging. It appears when printing\n# the SampleInput.\n#\n# THE (OPTIONAL) ERROR INPUTS FUNCTION\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# OpInfos may optionally specify \"error inputs\" through an error function. If\n# specified test_errors in test_ops.py will call the op with these inputs\n# and validate that the desired error is thrown.\n#\n# Error inputs automate a common testing pattern where multiple inputs are\n# passed to an operation and the errors they thrown are reviewed. Tests\n# written in this style should be ported to the new OpInfo pattern.\n#\n# Error inputs are specified using the ErrorInputs class, which contains\n# a SampleInput (see above) and data about the expected error.\n#\n# OPINFO FILE ORGANIZATION\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# All OpInfos are currently defined in this file. Most OpInfo tests are defined\n# in test_ops.py, but some system-specific tests are defined in those\n# systems' test files, and subclass-specific tests are defined in the test\n# file that corresponds to that subclass (see the below).\n# Expect a reorganization in the future.\n#\n# WHAT'S TESTED?\n# ~~~~~~~~~~~~~~\n#\n# Every OpInfo in the op_db sequence has the following properties validated in\n# test_ops.py:\n#\n# - that its supported dtypes are specified correctly\n# - that the operation produces the same results when called with noncontiguous inputs\n# - that it supports the out= argument properly (if it allows out=),\n# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch\n# - that it works with the conjugate view bit properly\n# - that its function, method, and inplace variants perform the same operation\n# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all\n# do the same thing).\n# - that its inplace variant preserves the input's storage\n# - that its gradient formula is implemented correctly, and that it supports\n# gradgrad and complex grad and gradgrad and forward mode AD properly for\n# the op's function and inplace variants (method variants are skipped\n# to reduce test time).\n# - that the operation performs the same operation when traced or scripted\n# using the jit\n# - that the operation is autodifferentiated by the jit as expected\n# - that the operator's aliases, if any, perform the same operation and that\n# the jit understands the alias\n#\n# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,\n# and test_fx.py. These tests validate that operators work with NNC and FX\n# as expected.\n#\n# For performance, some of the above tests may only run on the first\n# SampleInput returned by an OpInfo's sample input function.\n#\n# In addition to these tests, some subclasses (discussed in the next section)\n# define additional tests.\n#\n# Critically, as mentioned above, what's not tested is that the operator\n# works as expected. When implementing an OpInfo an engineer must still\n# typically write one or more tests validating the operator's behavior.\n#\n# OPINFO (SUB)CLASSES\n# ~~~~~~~~~~~~~~~~~~~\n#\n# In addition to the OpInfo base class there are several specialized OpInfo\n# subclasses. For example, the UnaryUfuncInfo subclass is used for\n# unary elementwise operations. These operations have a common structure\n# that test_unary_ufuncs.py exploits with additional automated testing.\n# The automated testing in test_unary_ufuncs.py is so thorough, comparing\n# the operator to a NumPy reference function on a plethora of values, that\n# just implementing an OpInfo for a unary elementwise operation is often\n# sufficient testing.\n#\n# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a\n# very unique class of operations. These OpInfos aren't included in the\n# op_db sequence and have their own tests.\n#\n# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience\n# when writing OpInfos.\n#\n# TESTING A NEW OPERATOR\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# If you're adding a new operator to any of the following namespaces:\n# - torch\n# - torch.fft\n# - torch.linalg,\n# - torch.special\n# - torch.nn.functional\n# then you should typically add an OpInfo for it.\n#\n# As mentioned a couple times above, implementing an OpInfo is not\n# usually sufficient testing (unless the operator is a unary elementwise\n# operator). The OpInfo will only test the properties described in the\n# \"WHAT'S TESTED\" section. It DOES NOT verify that the operator is\n# implemented correctly.\n#\n# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to\n# be consumed by a variety of systems it can be hard to understand how to\n# deal with test failures or how to set the OpInfo metadata properly.\n#\n# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs\n# function must be defined, and the operator's dtypes must be specified.\n# Once that's done you should run the operator's tests in test_ops.py\n# (these can be filtered using the \"-k\" argument in pytest). Tests that\n# fail should provide an error message that describes what to change about\n# your OpInfo. You don't need to worry about changing an OpInfo's default\n# values unless a test yells at you.\n#\n# Similarly, if you're writing a test that consumes OpInfos then it's critical\n# your test provides a clear error message describing what to do when it\n# fails. You should not assume the OpInfo implementer is familiar with your\n# system.\n#\n# If you see a confusing error message while developing an OpInfo then please\n# file an issue describing what happened.\n#\n# This trial-and-error approach can be frustrating to writing an OpInfo can\n# be frustrating, but it's probably necessary as long as OpInfos don't require\n# learning about all the systems that consume them. One thing that can help\n# is the get_supported_dtypes() function defined in opinfo_helper.py. This\n# function can be used to programmatically specify the dtypes an operator\n# supports, and is especially useful if writing an OpInfo on a machine\n# without a CUDA device. See its documentation for more details.\n#\n# THE FUTURE OF OPINFOS AND OPINFO TESTING\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# In the future we expect OpInfo coverage to improve and cover\n# the great majority of PyTorch's (public) operators.\n#\n\n# Classes and methods for the operator database\nclass OpInfo(object):\n \"\"\"Operator information and helper functions for acquiring it.\"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # An optional reference function that accepts ndarrays (AKA \"NumPy arrays\").\n # If given, the op will be compared with its reference on each of its sample inputs.\n # the following metadata describes the operator, its variants,\n # and its aliases, if any\n aliases=None, # iterable of aliases, e.g. (\"absolute\",) for torch.abs\n variant_test_name='', # additional string to include in the test name\n # this is useful when an op needs multiple OpInfos,\n # like divide does, often because it's really several\n # different ops behind the scenes\n op=None, # the function variant of the operation, populated as torch.<name> if None\n method_variant=_NOTHING, # explicitly specifies the method variant of the operator\n # if _NOTHING (default), the method variant will be autopopulated\n # if None, then the OpInfo specifies no method variant\n inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator\n # if _NOTHING (default), the method variant will be autopopulated\n # if None, then the OpInfo specifies no method variant\n\n # the following metadata are test directives for skipping or\n # modifying tests\n skips=tuple(), # information about which tests to skip\n decorators=tuple(), # decorators to apply to generated tests\n\n # the following are pointers to functions to generate certain classes\n # of inputs\n sample_inputs_func=None, # function to generate sample inputs\n error_inputs_func=None, # function to generate inputs that will throw errors\n\n # the following metadata relates to dtype support and is tested for correctness in test_ops.py\n dtypes, # dtypes this function works with on the CPU,\n # inherited by other device types that don't specify their own dtypes\n\n # the following dtypesIf... options override the dtypes value\n # on their respective device types\n dtypesIfCPU=None, # dtypes this function is expected to work with on the CPU,\n # typically unnecessary since it's (now) redundant with the dtypes kwarg above\n dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA\n dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM\n backward_dtypes=None, # backward dtypes this function is expected to work with\n backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU\n backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA\n backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM\n default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with\n # these dtypes for the op unless otherwise specified.\n # This is helpful in reducing the test matrix.\n # the following metadata describes the operators out= support\n supports_out=True, # whether the op supports the out kwarg\n # defaults to True, if the op does not allow the out kwarg or\n # supports it incorrectly then test_out in test_ops.py should fail\n safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments\n\n # the following metadata relates to autograd support\n supports_autograd=True, # whether the operation supports backward mode AD\n # if true, gradient correctness is tested in test_ops.py\n # using the op's sample inputs\n supports_gradgrad=None, # whether the op supports second order gradients\n # if true, gradgrad correctness is tested in test_ops.py\n # defaults to support_autograd's value\n supports_inplace_autograd=None, # whether the operation supports inplace autograd\n # if true, tested in test_ops.py\n # defaults to supports_autograd's value\n supports_forward_ad=False, # Whether the operation support forward mode AD\n # If the value is True, we check that the gradients are correct\n # If the value is False, we test that forward grad is not implemented\n gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck\n check_batched_grad=None, # whether to check batched grad when doing gradcheck\n # defaults to support_autograd's value\n check_batched_gradgrad=None, # whether to check batched grad grad when doing gradgradcheck\n # default's to support_gradgrad's value\n check_batched_forward_grad=None, # whether to check batched forward grad when doing gradcheck\n # defaults to the value of `supports_forward_ad and check_batched_grad`\n gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck\n gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.\n # When set to None, defers to the default value provided by the wrapper\n # function around gradcheck (testing._internal.common_utils.gradcheck)\n\n # the following metadata relates to JIT support and is tested for correctness in test_ops.py\n aten_name=None, # name of the corresponding aten:: operator\n assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed\n autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a\n # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],\n # default is populated to be ['aten::(name of Python operator)']\n autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups\n # inside of DifferentiableGraphs when this operation is autodiffed.\n # Ex: ['aten::add', 'aten::mm'], defaults to an empty list\n # Note: currently no ops use fusible nodes\n\n # the following metadata relates to sparse support and is used in test_sparse.py\n supports_sparse=False, # whether the op supports sparse inputs\n\n supports_scripting=True, # only run tracing tests\n # the following metadata relates to sparse csr support and is used in test_sparse_csr.py\n supports_sparse_csr=False, # whether the op supports sparse csr inputs\n # the following metadata relates to complex support and is checked in test_ops.py\n test_conjugated_samples=True,\n test_neg_view=True,\n assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape\n ):\n\n dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)\n # Validates the dtypes are generated from the dispatch-related functions\n for dtype_list in dtypes_args:\n assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))\n\n self.name = name\n self.ref = ref\n self.aten_name = aten_name if aten_name is not None else name\n self.variant_test_name = variant_test_name\n\n # Attribute to verify dynamic_dtypes are used.\n self.dynamic_dtypes = any(map(lambda dtypes: isinstance(\n dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))\n\n if self.dynamic_dtypes:\n # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU\n # This is because, below we set dtypesIfCUDA to dtypes if they are None.\n assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \\\n (f\"To use dynamic dypes for operator {name}, \"\n \"acquire the dtypes dynamically for argument `dtypesIfCUDA`.\"\n \"This is to ensure that CUDA dtypes are acquired correctly as they\"\n \"differ from CPU dtypes occasionally\")\n\n self.dtypes = set(dtypes)\n\n # NOTE: backward dtypes must be acquired before forward dtypes\n # since they fallback to explicit (not implicit!) specifications of\n # forward dtypes\n self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes\n self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (\n backward_dtypes if backward_dtypes is not None\n else dtypesIfCPU if dtypesIfCPU is not None\n else dtypes)\n self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (\n backward_dtypes if backward_dtypes is not None\n else dtypesIfCUDA if dtypesIfCUDA is not None\n else dtypes)\n self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (\n backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None\n else backward_dtypes if backward_dtypes is not None\n else dtypesIfROCM if dtypesIfROCM is not None\n else dtypesIfCUDA if dtypesIfCUDA is not None\n else dtypes)\n\n self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes\n self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes\n self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA\n\n self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None\n\n # NOTE: if the op is unspecified it is assumed to be under the torch namespace\n self.op = op if op else _getattr_qual(torch, self.name)\n method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant\n # attributes like real, imag are not callable\n self.method_variant = method_variant if callable(method_variant) else None\n inplace_name = name + \"_\"\n self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \\\n if inplace_variant is _NOTHING else inplace_variant\n self.operator_variant = getattr(operator, name, None)\n\n self.supports_out = supports_out\n self.safe_casts_outputs = safe_casts_outputs\n\n self.decorators = (*decorators, *skips)\n\n self.sample_inputs_func = sample_inputs_func\n self.error_inputs_func = error_inputs_func\n\n self.assert_autodiffed = assert_autodiffed\n self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []\n if autodiff_nonfusible_nodes is None:\n self.autodiff_nonfusible_nodes = ['aten::' + self.name]\n else:\n self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes\n\n # Autograd support\n\n # Autograd flags that don't depend on backward AD\n self.supports_autograd = supports_autograd\n self.supports_forward_ad = supports_forward_ad\n self.gradcheck_fast_mode = gradcheck_fast_mode\n self.gradcheck_wrapper = gradcheck_wrapper\n self.gradcheck_nondet_tol = gradcheck_nondet_tol\n\n # Autograd flags that depend on backward AD only\n # - If setting has been explicitly set, raise error if inconsistent\n if supports_gradgrad is None:\n supports_gradgrad = supports_autograd\n else:\n assert not (supports_gradgrad and not supports_autograd), (\n \"supports_gradgrad refines the part of autograd is supported, so it should \"\n \"not be set if supports_autograd is False\")\n if check_batched_grad is None:\n check_batched_grad = supports_autograd or supports_forward_ad\n else:\n assert not (check_batched_grad and not (supports_autograd or supports_forward_ad)), (\n \"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so \"\n \"it should not be set if supports_autograd is False\")\n if check_batched_gradgrad is None:\n check_batched_gradgrad = supports_gradgrad\n else:\n assert not (check_batched_gradgrad and not supports_gradgrad), (\n \"check_batched_gradgrad refines the part of autograd that will be checked (by \"\n \"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd \"\n \"is False.\")\n if check_batched_forward_grad is None:\n check_batched_forward_grad = supports_forward_ad\n else:\n assert not (check_batched_forward_grad and not supports_forward_ad), (\n \"check_batched_forward_grad should only be used when supports_forward_ad \"\n \"is True. It is used to disable the test in the specific cases \"\n \"where the op supports both forward ad but fails to compute \"\n \"batched forward grad.\")\n\n self.supports_gradgrad = supports_gradgrad\n self.check_batched_grad = check_batched_grad\n self.check_batched_gradgrad = check_batched_gradgrad\n self.check_batched_forward_grad = check_batched_forward_grad\n\n # Autograd flags that depend on both forward AD and backward AD\n if supports_inplace_autograd is None:\n supports_inplace_autograd = supports_autograd or supports_forward_ad\n else:\n assert not (supports_inplace_autograd and not supports_autograd and not supports_forward_ad), (\n \"supports_inplace_autograd refines the part of autograd that is supported, so \"\n \"it should not be set if both supports_autograd and supports_forward_ad are False\")\n self.supports_inplace_autograd = supports_inplace_autograd\n\n self.supports_sparse = supports_sparse\n self.supports_sparse_csr = supports_sparse_csr\n\n self.aliases = ()\n if aliases is not None:\n self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]\n\n self.supports_scripting = supports_scripting\n self.assert_jit_shape_analysis = assert_jit_shape_analysis\n\n self.test_conjugated_samples = test_conjugated_samples\n self.test_neg_view = test_neg_view\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls the function variant of the operator.\"\"\"\n return self.op(*args, **kwargs)\n\n def get_op(self):\n \"\"\"Returns the function variant of the operator, torch.<op_name>.\"\"\"\n return self.op\n\n def get_method(self):\n \"\"\"Returns the method variant of the operator, torch.Tensor.<op_name>.\n Returns None if the operator has no method variant.\n \"\"\"\n return self.method_variant\n\n def get_inplace(self):\n \"\"\"Returns the inplace variant of the operator, torch.Tensor.<op_name>_.\n Returns None if the operator has no inplace variant.\n \"\"\"\n return self.inplace_variant\n\n def get_operator_variant(self):\n \"\"\"Returns operator variant of the operator, e.g. operator.neg\n Returns None if the operator has no operator variant.\n \"\"\"\n return self.operator_variant\n\n def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs but with the tensor input or first\n tensor in a sequence input conjugated.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n\n conj_samples = list(samples)\n\n def conjugate(tensor):\n _requires_grad = tensor.requires_grad\n with torch.no_grad():\n tensor = tensor.conj()\n return tensor.requires_grad_(_requires_grad)\n\n for i in range(len(samples)):\n sample = conj_samples[i]\n # Note: it is assumed that the input here is either a tensor or tensorlist\n if isinstance(sample.input, torch.Tensor):\n sample.input = conjugate(sample.input)\n else:\n with torch.no_grad():\n sample.input[0] = conjugate(sample.input[0])\n\n return tuple(conj_samples)\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs.\n\n These samples should be sufficient to test the function works correctly\n with autograd, TorchScript, etc.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n\n if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):\n conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)\n samples_list = list(samples)\n samples_list.extend(conj_samples)\n samples = tuple(samples_list)\n\n return samples\n\n def error_inputs(self, device, **kwargs):\n \"\"\"\n Returns an iterable of ErrorInputs.\n \"\"\"\n return self.error_inputs_func(self, device, **kwargs)\n\n def get_decorators(self, test_class, test_name, device, dtype):\n '''Returns the decorators targeting the given test.'''\n result = []\n for decorator in self.decorators:\n if isinstance(decorator, DecorateInfo):\n if decorator.is_active(test_class, test_name, device, dtype):\n result.extend(decorator.decorators)\n else:\n result.append(decorator)\n return result\n\n def supported_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.dtypesIfCPU\n if device_type == 'cuda':\n return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA\n else:\n return self.dtypes\n\n def supported_backward_dtypes(self, device_type):\n if not self.supports_autograd:\n return set()\n\n backward_dtypes = None\n if device_type == 'cpu':\n backward_dtypes = self.backward_dtypesIfCPU\n elif device_type == 'cuda':\n backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA\n else:\n backward_dtypes = self.backward_dtypes\n\n allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)\n return set(allowed_backward_dtypes).intersection(backward_dtypes)\n\n def supports_complex_autograd(self, device_type):\n if device_type == 'cpu':\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)\n if device_type == 'cuda':\n if TEST_WITH_ROCM:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypes)\n\n def supports_dtype(self, dtype, device_type):\n return dtype in self.supported_dtypes(device_type)\n\n def default_test_dtypes(self, device_type):\n \"\"\"Returns the default dtypes used to test this operator on the device.\n\n Equal to the operator's default_test_dtypes filtered to remove dtypes\n not supported by the device.\n \"\"\"\n supported = self.supported_dtypes(device_type)\n return (supported if self._default_test_dtypes is None\n else supported.intersection(self._default_test_dtypes))\n\n @property\n def formatted_name(self):\n \"\"\"Returns a formatted full name for this OpInfo that can be used in test names.\"\"\"\n variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''\n return '{}{}'.format(self.name.replace('.', '_'), variant)\n\n\ndef _generate_reduction_inputs(device, dtype, requires_grad):\n \"\"\"Generates input tensors for testing reduction operators\"\"\"\n yield make_tensor([], device, dtype, requires_grad=requires_grad)\n yield make_tensor([2], device, dtype, requires_grad=requires_grad)\n yield make_tensor([3, 5], device, dtype, requires_grad=requires_grad)\n yield make_tensor([3, 2, 1, 2], device, dtype, requires_grad=requires_grad)\n\n\ndef _generate_reduction_kwargs(ndim, supports_multiple_dims=True):\n \"\"\"Generates a subset of all valid dim and keepdim kwargs given ndim that\n is appropriate for testing reduction operators.\n \"\"\"\n\n # Test default dim and keepdim\n yield {}\n\n # Test reducing inner and outer most dimensions\n yield {'dim': 0, 'keepdim': True}\n yield {'dim': -1, 'keepdim': False}\n\n # Test reducing middle dimension\n if ndim > 2:\n yield {'dim': ndim // 2, 'keepdim': True}\n\n if supports_multiple_dims:\n # Test reducing all dimensions\n yield {'dim': tuple(range(ndim)), 'keepdim': False}\n\n # Test reducing both first and last dimensions\n if ndim > 1:\n yield {'dim': (0, -1), 'keepdim': True}\n\n # Test reducing every other dimension starting with the second\n if ndim > 3:\n yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}\n\n\ndef sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for reduction operators.\"\"\"\n\n # TODO(@heitorschueroff) Once all reduction operators are using\n # ReductionOpInfo use op_info.supports_multiple_dims directly.\n supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)\n\n # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo\n # use op_info.genearte_args_kwargs directly.\n generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))\n\n inputs: List[SampleInput] = []\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):\n kwargs.update(reduction_kwargs)\n inputs.append(SampleInput(\n t.detach().clone().requires_grad_(requires_grad),\n args=args,\n kwargs=kwargs))\n\n return inputs\n\n\ndef _generate_masked_op_mask(input_shape, device, **kwargs):\n yield None\n yield make_tensor(input_shape, device, torch.bool, requires_grad=False)\n if len(input_shape) > 2:\n # broadcast last mask dimension:\n yield make_tensor(input_shape[:-1] + (1,), device, torch.bool, requires_grad=False)\n # broadcast middle mask dimension:\n yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], device, torch.bool, requires_grad=False)\n # broadcast first mask dimension:\n yield make_tensor((1,) + input_shape[1:], device, torch.bool, requires_grad=False)\n # mask.ndim < input.ndim\n yield make_tensor(input_shape[1:], device, torch.bool, requires_grad=False)\n # mask.ndim == 1\n yield make_tensor(input_shape[-1:], device, torch.bool, requires_grad=False)\n # masks that require broadcasting of inputs (mask.ndim >\n # input.ndim) will not be supported, however, we may\n # reconsider this if there will be demand on this kind of\n # degenerate cases.\n\n\ndef sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked reduction operators.\n\n Masked reduction operator is a reduction operator with trailing\n mask optional argument. A mask is a bool tensor with the same\n shape as input or a shape that is broadcastable to input shape.\n \"\"\"\n inputs: List[SampleInput] = []\n kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims\n for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):\n for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):\n sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)\n inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n if(not requires_grad and dtype.is_floating_point and\n sample_input.input.ndim == 2 and mask is not None and\n mask.shape == sample_input.input.shape):\n for v in [torch.inf, -torch.inf, torch.nan]:\n t = sample_input.input.clone()\n t.diagonal()[:] = v\n inputs.append(SampleInput(t.detach().requires_grad_(requires_grad),\n args=sample_input_args,\n kwargs=sample_input_kwargs))\n\n return inputs\n\n\ndef sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked norm.\n \"\"\"\n inputs: List[SampleInput] = []\n for ord in [2.0, 1, float('inf'), float('-inf'), 0]:\n for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()\n inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\ndef sample_inputs_masked_var(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked var.\n \"\"\"\n inputs: List[SampleInput] = []\n for unbiased in [False, True]:\n for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n if sample_input.args:\n dim = sample_input.args[0]\n sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:]\n sample_input_kwargs = sample_input.kwargs.copy()\n else:\n dim = sample_input.kwargs.get('dim')\n sample_input_args = sample_input.args\n sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased)\n if requires_grad:\n inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs)\n orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64),\n dim, keepdim=True, mask=inmask)\n if orig_count.min() <= int(unbiased):\n # Skip samples that lead to singularities in var\n # computation resulting nan values both in var and\n # autograd output that test_grad_fn cannot handle\n # correctly.\n continue\n inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\n# NOTE [Reductions]:\n#\n# For testing purposes, we relax the definition of a reduction operator\n# as defined in the docstring below. We do this to capture operators with\n# a similar API so they can be tested automatically. However...\n#\n# Strictly speaking a reduction operator is an operator that can reduce an\n# array to a single scalar value and that can be computed from the partial\n# result of reducing subarrays. This usually means that the reduction operation\n# should be commutative and associative. This definition is important when it\n# comes to implementation as it determines how a reduction can be parallelized.\n#\n# For example, many summary statistics such as median, mode and quantile cannot\n# be computed from partial results because these are sorting and counting based\n# algorithms that need information that would be lost in the reduced value.\nclass ReductionOpInfo(OpInfo):\n \"\"\"Reduction operator information.\n\n An operator is a reduction operator if it reduces one or more dimensions of\n the input tensor to a single value. Reduction operators must implement the\n following signature:\n\n - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`\n\n ReductionOpInfo tests that reduction operators implement a consistent API.\n Optional features such as reducing over multiple dimensions are captured in\n the optional keyword parameters of the ReductionOpInfo constructor.\n\n If a reduction operator does not yet implement the full required API of\n reduction operators, this should be documented by skipping the failing\n tests rather than adding optional parameters to ReductionOpInfo.\n\n NOTE\n The API for reduction operators has not yet been finalized and some\n requirements may change.\n\n See tests in test/test_reductions.py\n \"\"\"\n\n def __init__(\n self, name, *,\n\n # The identity value for the operator if it has one.\n identity: Optional[Any] = None,\n\n # The nan policy for the operator if it implements one.\n # - propagate: NaN values are propagated to the output\n # - omit: NaN values are discarded during the reduction\n nan_policy: Optional[str] = None,\n\n # Whether the operator supports reducing multiple dimensions.\n supports_multiple_dims: bool = True,\n\n # Whether the operator promotes integral to floating point dtypes.\n promotes_int_to_float: bool = False,\n\n # Whether the operator promotes all integral dtypes to int64.\n promotes_int_to_int64: bool = False,\n\n # If a specific dtype is given, then the operator always returns that\n # dtype irrespective of the input dtype. If None, the operator returns\n # the dtype according to the type promotion rules above.\n result_dtype: Optional[torch.dtype] = None,\n\n # ReductionOpInfo tests generate their own input, dim and keepdim\n # arguments and call this function to generate tuples of extra args and\n # kwargs to use when calling the op. This is required for operators that\n # have other required parameters besides the input tensor.\n generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),\n\n # Options from the OpInfo base class\n **kwargs,\n ):\n assert nan_policy in (None, 'propagate', 'omit')\n\n # These are mutually exclusive options\n assert not (result_dtype and promotes_int_to_float)\n assert not (result_dtype and promotes_int_to_int64)\n assert not (promotes_int_to_float and promotes_int_to_int64)\n\n # Default sample_inputs_func for ReductionOpInfo which augments sample\n # inputs from sample_inputs_reduction with the args and kwargs from\n # generate_args_kwargs. This is only used if sample_inputs_func is None.\n def sample_inputs_func(*args, **kwargs):\n kwargs['supports_multiple_dims'] = supports_multiple_dims\n kwargs['generate_args_kwargs'] = generate_args_kwargs\n return sample_inputs_reduction(*args, **kwargs)\n\n # Override OpInfo defaults and call base class __init__\n kwargs.setdefault('inplace_variant', None)\n kwargs.setdefault('sample_inputs_func', sample_inputs_func)\n kwargs.setdefault('default_test_dtypes', (\n torch.uint8, torch.int64, torch.float16, torch.bfloat16, torch.float32, torch.complex64))\n super(ReductionOpInfo, self).__init__(name, **kwargs)\n\n self.identity = identity\n self.nan_policy = nan_policy\n self.supports_multiple_dims = supports_multiple_dims\n self.promotes_int_to_float = promotes_int_to_float\n self.promotes_int_to_int64 = promotes_int_to_int64\n self.result_dtype = result_dtype\n self.generate_args_kwargs = generate_args_kwargs\n\n\ndef sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n low = low if low is None else low + op_info._domain_eps\n high = high if high is None else high - op_info._domain_eps\n\n if op_info.supports_sparse_csr:\n # Tensors with dim=2 for sparse CSR testing\n return (SampleInput(make_tensor((L, L), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)),)\n else:\n return (SampleInput(make_tensor((L,), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)))\n\n# Metadata class for unary \"universal functions (ufuncs)\" that accept a single\n# tensor and have common properties like:\nclass UnaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal unary functions (unary ufuncs).'\n These are functions of a single tensor with common properties like:\n - they are elementwise functions\n - the input shape is the output shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n default_test_dtypes=(\n torch.uint8, torch.long, torch.half, torch.bfloat16,\n torch.float32, torch.cfloat), # dtypes which tests check by default\n domain=(None, None), # the [low, high) domain of the function\n handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)\n handles_extremals=True, # whether the op correctly handles extremal values (like inf)\n handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)\n supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle\n sample_inputs_func=sample_inputs_unary,\n sample_kwargs=lambda device, dtype, input: ({}, {}),\n supports_sparse=False,\n reference_numerics_filter=None, # Filter for singular input values for test_reference_numerics_normal\n **kwargs):\n super(UnaryUfuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n default_test_dtypes=default_test_dtypes,\n sample_inputs_func=sample_inputs_func,\n supports_sparse=supports_sparse,\n **kwargs)\n self.ref = ref\n self.domain = domain\n self.handles_large_floats = handles_large_floats\n self.handles_extremals = handles_extremals\n self.handles_complex_extremals = handles_complex_extremals\n self.supports_complex_to_float = supports_complex_to_float\n self.reference_numerics_filter = reference_numerics_filter\n\n # test_unary_ufuncs.py generates its own inputs to test the consistency\n # of the operator on sliced tensors, non-contig tensors, etc.\n # `sample_kwargs` is a utility function to provide kwargs\n # along with those inputs if required (eg. clamp).\n # It should return two dictionaries, first holding kwarg for\n # torch operator and second one for reference NumPy operator.\n self.sample_kwargs = sample_kwargs\n\n # Epsilon to ensure grad and gradgrad checks don't test values\n # outside a function's domain.\n self._domain_eps = 1e-5\n\ndef sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n args_cases = (\n # Cases with tensor indices.\n (torch.tensor([1, 2, 3]),),\n (torch.tensor(1),),\n (torch.tensor([1, 2, 3]), 1),\n (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),\n # Cases with list of indices.\n ((2, 4),),\n ((2, 4), 1),\n ((2, 4), -1),\n # Cases with integer section.\n (3,),\n (3, 1),\n (3, -1),\n )\n\n def generator():\n for args in args_cases:\n yield SampleInput(make_input((S, S, S)), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_linalg_det(op_info, device, dtype, requires_grad):\n kw = dict(device=device, dtype=dtype)\n inputs = [\n make_tensor((S, S), **kw),\n make_tensor((1, 1), **kw), # 1x1\n random_symmetric_matrix(S, **kw), # symmetric\n random_symmetric_psd_matrix(S, **kw), # symmetric_psd\n random_symmetric_pd_matrix(S, **kw), # symmetric_pd\n\n random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null\n random_square_matrix_of_rank(S, 1, **kw), # rank1\n random_square_matrix_of_rank(S, 2, **kw), # rank2\n\n random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value\n make_tensor((3, 3, S, S), **kw), # batched\n make_tensor((3, 3, 1, 1), **kw), # batched_1x1\n random_symmetric_matrix(S, 3, **kw), # batched_symmetric\n random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd\n random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd\n random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values\n make_tensor((0, 0), **kw),\n make_tensor((0, S, S), **kw),\n ]\n for t in inputs:\n t.requires_grad = requires_grad\n return [SampleInput(t) for t in inputs]\n\ndef sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_singular_matrix_batch_base(size, rank):\n assert size[-1] == size[-2]\n assert rank > 0 and rank <= size[-1]\n\n with torch.no_grad():\n n = size[-1]\n a = make_arg(size[:-2] + (n, rank)) / 10\n b = make_arg(size[:-2] + (rank, n)) / 10\n\n x = a @ b\n lu, pivs = x.lu()\n p, l, u = torch.lu_unpack(lu, pivs)\n u_diag_abs = u.diagonal(0, -2, -1).abs()\n u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values\n u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices\n u.diagonal(0, -2, -1).div_(u_diag_abs_largest)\n u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps\n\n matrix = p @ l @ u\n\n assert (matrix.det().abs() < torch.finfo(dtype).eps * torch.linalg.matrix_norm(matrix)).all().item()\n\n matrix.requires_grad_(requires_grad)\n return matrix\n\n def sample_generator():\n for batch, size in product(((), (2,), (2, 2)), range(6)):\n shape = batch + (size, size)\n for rank in range(1, size):\n yield make_singular_matrix_batch_base(shape, rank)\n\n return [SampleInput(t) for t in sample_generator()]\n\n\ndef sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):\n # (<matrix_size>, (<batch_sizes, ...>))\n test_sizes = [\n (1, ()),\n (2, (0,)),\n (2, (2,)),\n ]\n\n inputs = []\n for matrix_size, batch_sizes in test_sizes:\n size = batch_sizes + (matrix_size, matrix_size)\n for n in (0, 3, 5):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(n,)))\n for n in [-4, -2, -1]:\n t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)\n t.requires_grad = requires_grad\n inputs.append(SampleInput(t, args=(n,)))\n\n return inputs\n\ndef sample_inputs_hsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6,), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_vsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_dsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),\n SampleInput(make_tensor((S, S, 6), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),)\n\ndef sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):\n # Each test case consists of the sizes in the chain of multiplications\n # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)\n test_cases = [\n [1, 2, 1],\n [2, 0, 2],\n [0, 2, 2],\n [2, 2, 2, 2],\n [2, 3, 4, 5],\n [5, 4, 0, 2],\n [2, 4, 3, 5, 3, 2]\n ]\n\n result = []\n for sizes in test_cases:\n tensors = []\n for size in zip(sizes[:-1], sizes[1:]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n tensors.append(t)\n result.append(SampleInput(tensors))\n\n return result\n\ndef sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((2, 2), (2, 3, 2))\n ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)\n dims = ((-2, -1), (-1, 0))\n\n inputs: List[SampleInput] = []\n for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(ord, dim, keepdim)))\n\n return inputs\n\ndef sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):\n test_sizes = [\n (S,),\n (0,),\n (S, S),\n (0, 0),\n (S, 0),\n (0, S),\n (S, S, S),\n (0, S, S),\n (S, 0, S),\n (0, 0, 0),\n ]\n\n vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)\n matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)\n\n inputs = []\n\n for test_size in test_sizes:\n is_vector_norm = len(test_size) == 1\n is_matrix_norm = len(test_size) == 2\n\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype, low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n keepdim=keepdim)))\n\n if not (is_vector_norm or is_matrix_norm):\n continue\n\n ords = vector_ords if is_vector_norm else matrix_ords\n\n for ord in ords:\n\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim)))\n\n if ord in ['nuc', 'fro']:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n ord=ord,\n keepdim=keepdim,\n dim=(0, 1))))\n return inputs\n\ndef sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # input shape, output shape, output stride, output storage offset\n test_cases = [\n ((1,), (1,), (1,), 0),\n ((3, 3), (2, 2), (1, 2), 0),\n ((3, 3), (2, 2), (1, 2), 1),\n ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),\n ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),\n ]\n\n samples = []\n\n for input_shape, output_shape, stride, storage_offset in test_cases:\n input_t = make_arg(input_shape)\n kwargs = dict(storage_offset=storage_offset)\n samples.append(SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs))\n\n return samples\n\ndef sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs):\n inputs = (\n (0,),\n (0, 1),\n (0, 1, 2, 3),\n )\n\n rvals = [1, 2, 4]\n\n products = product(inputs, rvals, [False, True])\n\n samples = []\n\n for input_data, r, with_replacement in products:\n input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad)\n kwargs = dict(r=r, with_replacement=with_replacement)\n\n samples.append(SampleInput(input_t, kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # constructs 1-D tensors with varying number of elements\n a = make_arg((0,))\n b = make_arg((0, 1))\n c = make_arg((0, 1, 2, 3))\n\n samples = []\n\n # sample with only 1 tensor\n samples.append(SampleInput(\n a\n ))\n\n # sample with 2 tensors\n samples.append(SampleInput(\n a,\n args=(b,)\n ))\n\n # sample with 3 tensors\n samples.append(SampleInput(\n a,\n args=(b, c)\n ))\n\n return tuple(samples)\n\ndef sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input_shape, dict of dim and eps\n cases: Tuple[tuple, dict] = ( # type: ignore[assignment]\n ((S, S), {'dim': 1}),\n ((S, 2), {'dim': -1}),\n ((S,), {'dim': 0, 'eps': 0.5}),\n ((), {'dim': 0}),\n ((S, S, M), {'dim': 2}),\n ((S, S), {})\n )\n\n def generator():\n for input_shape, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)\n # Test for Broadcasting\n yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})\n yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2})\n yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})\n\n return list(generator())\n\ndef sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n # Ordered as: input shape, kwargs for training, momentum, eps\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}),\n ((3, 2, 4), {'training': False, 'momentum': -1.2}),\n ((3, 1), {'training': True, 'momentum': 0.0}),\n ((0,), {'training': True}),\n ((0,), {'training': False}),\n ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}),\n ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}),\n ((2, 1), {}),\n )\n\n def generator():\n for input_shape, kwargs in cases:\n # args: running mean, running var, weight and bias should necessarily be of shape: (channels,)\n channels = input_shape[1] if len(input_shape) > 1 else 0\n weight = make_arg(channels) if channels > 0 else None\n bias = make_arg(channels) if channels > 0 else None\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n\n yield SampleInput(\n make_arg(input_shape),\n args=(\n running_mean,\n running_var,\n weight,\n bias\n ),\n kwargs=kwargs\n )\n\n # Checking for permutations of weights and biases as `None`\n weights = [channels, None, None]\n biases = [None, channels, None]\n is_training = [True, False, False]\n\n for weight, bias, training in zip(weights, biases, is_training):\n yield SampleInput(\n make_arg(input_shape),\n args=(\n running_mean,\n running_var,\n make_arg(channels),\n make_arg(channels)\n ),\n kwargs={'training': training}\n )\n\n # Test case for no optional kwargs\n # running_mean and running_var are required in evaluation mode (training: False) but not in training mode\n yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True})\n\n return list(generator())\n\ndef sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n (()),\n ((S, )),\n ((S, S)),\n ((S, M, S))\n )\n\n def generator():\n for shape in cases:\n yield SampleInput(make_arg(shape))\n\n return list(generator())\n\ndef sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n (()),\n ((S, )),\n ((S, S)),\n ((S, M, S))\n )\n\n def generator():\n for shape in cases:\n for weight in [-1., 0., 0.8, 1.]:\n weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad)\n yield SampleInput(make_arg(shape), kwargs=dict(weight=weight_tensor))\n\n if len(shape) >= 2:\n channel_size = shape[1]\n yield SampleInput(make_arg(shape), kwargs=dict(weight=make_arg((channel_size,))))\n\n return list(generator())\n\ndef sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (2,), '2'),\n ((S, S), (0,), '0'),\n ((S, S), (0.5,), '0_5'),\n ((S, S), (1,), '1'),\n ((S, S), (3,), '3'),\n ((S, S), (-1,), 'neg_1'),\n ((S, S), (-2,), 'neg_2'),\n ((S, S), (-0.5,), 'neg_0_5'),\n ((S, S), (-1.5,), 'neg_1_5'),\n )\n\n cases_nonzero_input = (\n ((S, S, S), (1.5,), '1_5_default'),\n ((S, S, S), (1.5, 1), '1_5_dim'),\n ((S, S, S), (1.5, -1), '1_5_neg_dim'),\n ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),\n ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),\n )\n\n cases_negdim_base = (\n ((S, S), (-2, 1,), 'neg_2_2_dim'),\n ((S, S), (-1, 1,), 'neg_1_2_dim'),\n ((S, S), (0, 1,), '0_2_dim'),\n ((S, S), (1, 1,), '1_2_dim'),\n ((S, S), (2, 1,), '2_2_dim'),\n ((S, S), (3, 1,), '3_2_dim'),\n ((S, S, S), (2, 1), '2_dim'),\n ((S, S, S), (3, 1), '3_dim'),\n ((S, S, S), (2, 1, True), 'keepdim_2_dim'),\n ((S, S, S), (3, 1, True), 'keepdim_3_dim'),\n ((), (2, 0), '2_dim_scalar'),\n ((), (3, 0), '3_dim_scalar'),\n ((), (2, 0, True), 'keepdim_2_dim_scalar'),\n ((), (3, 0, True), 'keepdim_3_dim_scalar'),\n )\n\n cases_negdim = []\n for case in cases_negdim_base:\n cases_negdim.append(case)\n shape, args, name = case\n new_args = copy.deepcopy(list(args))\n new_args[1] *= -1\n cases_negdim.append((shape, tuple(new_args), name.replace(\"_dim\", \"_neg_dim\")))\n\n def generator():\n for shape, args, name in itertools.chain(cases, cases_negdim):\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n for shape, args, name in cases_nonzero_input:\n yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)\n\n return list(generator())\n\n\ndef sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (), 'default'),\n ((S, S), ('fro',), 'fro_default'),\n ((S, S), ('fro', [0, 1],), 'fro'),\n )\n\n def generator():\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n return list(generator())\n\n\ndef sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), ('nuc',), 'nuc'),\n ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),\n )\n\n def generator():\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n return list(generator())\n\n\ndef sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (-inf,), '-inf'),\n ((S, S), (inf,), 'inf'),\n ((S, S), (inf, 1,), 'inf_2_dim'),\n ((S, S), (inf, -1,), 'inf_2_neg_dim'),\n )\n\n def generator():\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n return list(generator())\n\n\ndef sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):\n size_1D = (S,)\n size_2D = (2, 2)\n\n test_cases = [\n # input size, ord, dim args\n (size_1D, 2, None),\n (size_1D, 2, (0,)),\n (size_1D, 0, None),\n (size_1D, 0, (0,)),\n (size_1D, 0.9, None),\n (size_1D, 0.9, (0,)),\n (size_1D, 1, None),\n (size_1D, 1, (0,)),\n (size_1D, -2.1, None),\n (size_1D, -2.1, (0,)),\n (size_1D, inf, None),\n (size_1D, inf, (0,)),\n (size_1D, -inf, None),\n (size_1D, -inf, (0,)),\n\n (size_2D, 2, None),\n (size_2D, 2, (0,)),\n (size_2D, 2, (-1, 0)),\n (size_2D, 0, None),\n (size_2D, 0, (0,)),\n (size_2D, 0, (-1, 0)),\n (size_2D, 0.9, None),\n (size_2D, 0.9, (0,)),\n (size_2D, 0.9, (-1, 0)),\n (size_2D, 1, None),\n (size_2D, 1, (0,)),\n (size_2D, 1, (-1, 0)),\n (size_2D, -2.1, None),\n (size_2D, -2.1, (0,)),\n (size_2D, -2.1, (-1, 0)),\n (size_2D, inf, None),\n (size_2D, inf, (0,)),\n (size_2D, inf, (-1, 0)),\n (size_2D, -inf, None),\n (size_2D, -inf, (0,)),\n (size_2D, -inf, (-1, 0)),\n ]\n inputs = []\n\n for test_size, ord, dim in test_cases:\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim,\n dim=dim)))\n\n return inputs\n\n\n# Metadata class for binary \"universal functions (ufuncs)\" that accept two\n# tensor and have common properties\nclass BinaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal binary functions (binary ufuncs).'\n These are functions of two tensors with common properties like:\n - they are elementwise functions\n - the output shape is determined by the input shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/stable/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n def __init__(self, name, *,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float\n always_returns_bool=False, # Set to true if the op always returns bool tensors\n **kwargs):\n super().__init__(name, **kwargs)\n\n # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.\n if lhs_make_tensor_kwargs is None:\n lhs_make_tensor_kwargs = {}\n self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs\n\n if rhs_make_tensor_kwargs is None:\n rhs_make_tensor_kwargs = {}\n self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs\n\n self.promotes_int_to_float = promotes_int_to_float\n self.always_returns_bool = always_returns_bool\n\ndef _resolve_binary_pwise_kwargs(\n op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None\n):\n \"\"\"Resolves default values for :func:`sample_inputs_binary_pwise`.\n\n By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty\n dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`\n and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.\n \"\"\"\n if op_kwargs is None:\n op_kwargs = {}\n if lhs_make_tensor_kwargs is None:\n lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}\n if rhs_make_tensor_kwargs is None:\n rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}\n\n return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs\n\n\ndef sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n *,\n python_scalars=False,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n scalar = make_tensor((), device=device, dtype=dtype, **rhs_make_tensor_kwargs)\n if python_scalars:\n scalar = scalar.item() # type: ignore[assignment]\n\n shapes = [\n ((), scalar),\n ((S,), scalar),\n ((S, 1), (S,)),\n ((M, S), scalar),\n ((S, M, S), (M, S)),\n ((S, M, S), (S, M, S)),\n ((M, 1, S), (M, S)),\n ((M, 1, S), (1, M, S)),\n ]\n\n sample_inputs = []\n for shape_lhs, shape_rhs_or_scalar in shapes:\n lhs = make_tensor(\n shape_lhs,\n device=device,\n dtype=dtype,\n requires_grad=requires_grad,\n **lhs_make_tensor_kwargs,\n )\n if isinstance(shape_rhs_or_scalar, tuple):\n # shape\n rhs = make_tensor(\n shape_rhs_or_scalar,\n device=device,\n dtype=dtype,\n requires_grad=requires_grad,\n **rhs_make_tensor_kwargs,\n )\n broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs\n else:\n # scalar\n rhs = shape_rhs_or_scalar # type: ignore[assignment]\n broadcasts_input = False\n\n sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input))\n return sample_inputs\n\n\ndef sample_inputs_add_sub(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=False,\n alpha=1,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n sample_inputs = sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=python_scalars,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n **kwargs,\n )\n\n lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)\n rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)\n sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False))\n\n return sample_inputs\n\ndef sample_inputs_isclose(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=False,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n sample_inputs = sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=python_scalars,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n **kwargs,\n )\n\n rtols = [0., 1e-7]\n atols = [0., 1e-7]\n equal_nans = [False, True]\n\n products = product(rtols, atols, equal_nans)\n\n for rtol, atol, equal_nan in products:\n lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)\n rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)\n\n sample_inputs.append(SampleInput(lhs, args=(rhs,),\n kwargs=dict(op_kwargs, rtol=rtol, atol=atol, equal_nan=equal_nan)))\n\n return sample_inputs\n\ndef sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n return (SampleInput(make_arg((1, 2))),\n SampleInput(make_arg((2,))),\n SampleInput(make_arg(())))\n\n\ndef sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n first_shape, second_shape = (S, M), (M, S)\n sample_inputs = []\n\n sample_inputs.append(\n SampleInput(make_arg(first_shape, requires_grad=requires_grad),\n args=(make_arg(second_shape, requires_grad=requires_grad),)))\n\n if dtype.is_complex:\n sample_inputs.append(\n SampleInput(make_arg(first_shape, requires_grad=requires_grad),\n args=(make_arg(second_shape).conj().requires_grad_(requires_grad),)))\n\n sample_inputs.append(\n SampleInput(\n make_arg(first_shape).transpose(0, 1).requires_grad_(requires_grad),\n args=(make_arg(second_shape).transpose(0, 1).conj().requires_grad_(requires_grad),)))\n\n return sample_inputs\n\ndef sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):\n alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)\n beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)\n tests_list = [\n ((2, 3), (2, 2), (2, 3), False)\n ]\n tests_with_lhs_broadcasting = [\n ((1,), (2, 2), (2, 3), True),\n ((), (2, 2), (2, 3), True)\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n\n sample_inputs = []\n\n for shape_a, shape_b, shape_c, broadcasts_input in test_cases:\n sample_inputs.append(\n SampleInput(\n make_tensor(shape_a, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape_b, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape_c, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},\n broadcasts_input=broadcasts_input))\n\n if dtype.is_complex:\n shape = (3, 3)\n sample_inputs.append(\n SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad).t().conj(),\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},))\n sample_inputs.append(\n SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad).t().conj()),\n kwargs={'alpha': alpha_val, 'beta': beta_val},))\n return sample_inputs\n\ndef sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):\n sample_inputs = []\n sample_inputs.append(SampleInput(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ))\n if dtype.is_complex:\n # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)\n # is tested in test_conj_view (which tests operations with only conjugated input tensor\n # -- not conjugated arg tensors)\n sample_inputs.append(SampleInput(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n torch.conj(make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n )\n ))\n return sample_inputs\n\ndef sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n test_cases = (((S,), (S, M), (M,), 1, 1, False),\n ((S,), (S, M), (M,), 0.2, 0.6, False),\n )\n\n test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),\n ((1,), (S, M), (M,), 0.2, 0.6, True),\n ((), (S, M), (M,), 1, 1, True),\n ((), (S, M), (M,), 0.2, 0.6, True),\n )\n\n cases = test_cases + test_cases_with_broadcast\n\n def generator():\n # addmv performs: beta * M + alpha * (mat @ vec)\n for M, mat, vec, beta, alpha, broadcasts_input in cases:\n yield SampleInput(make_arg(M), args=(make_arg(mat), make_arg(vec)),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting\n test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n\n def generator():\n for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:\n if dtype.is_complex:\n beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)\n yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),\n kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)\n yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)\n\n return list(generator())\n\ndef sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [(((S, S), (S, S), (S, S)), False),\n (((S, S), (S, 1), (1, S)), False),\n (((1,), (S, S, 1), (1, S)), True),\n (((), (), ()), False),\n (((S, S), (), ()), True),\n (((), (S, S, 1), (1, S)), True)\n ]\n\n sample_inputs = []\n for input_args, broadcasts_input in test_cases:\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(\n args[0],\n args=args[1:],\n broadcasts_input=broadcasts_input))\n\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(\n args[0],\n args=args[1:],\n kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n sample_inputs = []\n for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:\n args = (make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch1_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch2_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(\n args[0].detach().clone().requires_grad_(requires_grad),\n args=(args[1].detach().clone().requires_grad_(requires_grad),\n args[2].detach().clone().requires_grad_(requires_grad)),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),\n broadcasts_input=broadcasts_input))\n\n if dtype.is_complex:\n shapes = [(S, S, S), (S, M, S), (S, S, M)]\n args = (make_tensor(shapes[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(shapes[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(shapes[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n sample_inputs.append(\n SampleInput(\n args[0].transpose(-1, 1).detach().requires_grad_(requires_grad),\n args=(args[1].transpose(-1, 1).conj().detach().requires_grad_(requires_grad),\n args[2].transpose(-1, 1).conj().detach().requires_grad_(requires_grad)),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):\n input1 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n input2 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n broadcasts_input=True)\n\n if dtype.is_complex:\n alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j\n elif dtype.is_floating_point:\n alpha, beta = 0.2, 0.6\n else:\n alpha, beta = 2, 3\n\n input3 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n input4 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha),\n broadcasts_input=True)\n\n return (input1, input2, input3, input4)\n\ndef sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\n\ndef sample_inputs_xlog1py(self, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def generator():\n # same shape\n yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))\n # rhs broadcast\n yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))\n # all zero `x`\n with torch.no_grad():\n x = make_arg((S, S))\n x.fill_(0)\n yield SampleInput(x, args=(make_arg((S, S), low=-1),))\n\n # randomly zero-masked `x`\n x = make_arg((S, S))\n y = make_arg((S, S), low=-1)\n with torch.no_grad():\n x[torch.rand(x.shape) > 0.5] = 0\n yield SampleInput(x, args=(y,))\n\n # Scalar x\n # `input` has to be a tensor\n # yield SampleInput(0, args=(make_arg((S, S), low=-1),))\n # yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))\n\n # Scalar y\n yield SampleInput(make_arg((S, S)), args=(-0.5,))\n yield SampleInput(make_arg((S, S)), args=(1.2,))\n\n return list(generator())\n\ndef sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = ((), (S, S, S), (S,))\n\n def generator():\n for shape in cases:\n yield(SampleInput(make_arg(shape)))\n\n return list(generator())\n\n\ndef sample_inputs_logsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((), (0,), True),\n ((S, S), (1,), True),\n ((S, S), (1,), False)\n )\n samples = []\n\n for shape, dim, keepdim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim, keepdim)))\n\n return tuple(samples)\n\ndef sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):\n inputs = [\n ((), {}),\n ((S, S), {}),\n ((0, S, 0), {}),\n ((S,), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), {'dtype': torch.double}),\n ((S,), {'device': 'cpu'}),\n ((S,), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), {'device': 'cuda'}))\n\n samples = []\n for shape, kwargs in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, kwargs=kwargs))\n\n return tuple(samples)\n\ndef get_independent_tensor(tensor):\n return tensor.detach().clone().requires_grad_(tensor.requires_grad)\n\ndef sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs):\n samples = []\n low = 2\n high = 10\n\n for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):\n # With high\n samples.append(SampleInput(\n sample.input,\n args=(high,) + sample.args,\n kwargs=sample.kwargs))\n # With low and high\n samples.append(SampleInput(\n get_independent_tensor(sample.input),\n args=(low, high,) + sample.args,\n kwargs=sample.kwargs))\n return tuple(samples)\n\ndef sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):\n inputs = [\n ((), (), {}),\n ((S, S), (2, 0), {}),\n ((0, S, 0), (3, 2, 2), {}),\n ((S,), (2, 3), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), (10,), {'dtype': torch.double}),\n ((S,), (1, 1, 12), {'device': 'cpu'}),\n ((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), (7, 2), {'device': 'cuda'}))\n\n samples = []\n for input_shape, output_shape, kwargs in inputs:\n t = make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs):\n def get_val(dtype):\n return make_tensor([], 'cpu', dtype).item()\n\n samples = []\n for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):\n # The scalar we are passing to new_full must be the same dtype\n # as the one of the resulting tensor\n use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype\n samples.append(SampleInput(\n sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs))\n return tuple(samples)\n\ndef sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs):\n def get_val(dtype):\n return make_tensor([], 'cpu', dtype).item()\n\n inputs = [\n ((), get_val(dtype), {}),\n ((S, S), get_val(dtype), {}),\n ((0, S, 0), get_val(dtype), {}),\n ((S,), get_val(dtype), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), get_val(torch.double), {'dtype': torch.double}),\n ((S,), get_val(dtype), {'device': 'cpu'}),\n ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), get_val(dtype), {'device': 'cuda'}))\n\n samples = []\n for shape, fill_value, kwargs in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_logcumsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((S, S, S), 0),\n ((S, S, S), 1),\n ((), 0),\n )\n samples = []\n\n for large_number in (True, False):\n for shape, dim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n\n if large_number and t.dim() > 0:\n with torch.no_grad():\n t[0] = 10000\n samples.append(SampleInput(t, args=(dim,)))\n\n return tuple(samples)\n\ndef sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):\n return (SampleInput((make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))),)\n\n\ndef sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n cases = (((S, S, S), (2, 1, 0.5)),\n ((S, S, S), (2, -1, 0.5)),\n ((S, S, S), (1, 2, 3)),\n ((S, S, S), (float('inf'), 2, 0.5)),\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((1, 2, 3), (-1, -2)),\n ((1, 2, 3), (-1, 2)),\n ((1, 2, 3), (1, -2)),\n ((1, 2, 3), (1, 2)),\n ((), (0, 0)),\n ((1, ), (0, 0)),\n ((M, M), (0, 1)),\n ((S, S, S), (2, 0)), )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\ndef sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S))\n return list(SampleInput(make_arg(shape)) for shape in shapes)\n\ndef sample_inputs_T(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((), (M, M))\n return list(SampleInput(make_arg(shape)) for shape in shapes)\n\n\ndef sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always invertible input for linear algebra ops using\n random_fullrank_matrix_distinct_singular_value.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n in product(batches, ns):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to\n test the backward method of `linalg_pinv`. That way we always preserve the rank of the\n input no matter the perturbations applied to it by the gradcheck.\n Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.\n \"\"\"\n batches = [(), (0, ), (2, ), (1, 1)]\n # the size of at least 30 is required to cause failures for the previous implicit implementation\n # of the pinv's backward method, albeit it is slow.\n size = [0, 3, 50]\n\n def generate_samples():\n for batch, m, n in product(batches, size, size):\n for k in range(min(3, min(m, n))):\n # Note that by making the columns of `a` and `b` orthonormal we make sure that\n # the product matrix `a @ b.t()` has condition number 1 when restricted to its image\n a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)\n b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)\n yield SampleInput(a, args=(b,))\n\n return list(generate_samples())\n\ndef sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # autograd is not supported for inputs with zero number of elements\n shapes = ((S, S),\n (2, S, S),\n (2, 1, S, S), )\n\n def generator():\n for shape in shapes:\n yield SampleInput(make_arg(shape))\n\n return list(generator())\n\ndef np_sinc_with_fp16_as_fp32(x):\n # Wraps numpy's sinc function so that fp16 values are promoted to fp32\n # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated\n # at 0 for fp16.\n if x.dtype == np.float16:\n return np.sinc(x.astype(np.float32))\n else:\n return np.sinc(x)\n\ndef sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n return tuple(\n SampleInput(\n make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(shape,)) for size, shape in test_cases)\n\ndef sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),)\n\n samples: List[SampleInput] = []\n for shape, *other_shapes in test_cases:\n samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))\n\n return samples\n\ndef sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n test_cases: Tuple[tuple] = (((1, S), (2, S), (3, S),),)\n\n samples: List[SampleInput] = []\n for shape, *other_shapes in test_cases:\n samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))\n\n return samples\n\ndef sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n (S, S, S),\n (S,),\n (),\n )\n\n sample_inputs = []\n for size in test_cases:\n tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)\n tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))\n sample_inputs.append(SampleInput(tensor1, args=(2,)))\n\n return tuple(sample_inputs)\n\n\ndef sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):\n small_S = 2\n test_cases = (\n ((S, S, 2), (S, S + 1, 2)),\n ((S, S), (S, S)),\n ((S, S, S), (S, S, S)),\n ((3, 5), (3, 5)),\n ((2, 3, 5), (2, 3, 5)),\n ((1, 2, 3), (1, 2, 3)),\n ((1, 1), (S, 1)),\n ((0, 5), (4, 5)),\n ((4, 5), (0, 5)),\n ((0, 4, 5), (3, 5)),\n ((4, 5), (0, 3, 5)),\n ((0, 4, 5), (1, 3, 5)),\n ((1, 4, 5), (0, 3, 5)),\n # Using S here would make this one test take 9s\n ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),\n ((small_S, 1, 1, small_S), (1, small_S, small_S)),\n ((1, 1, small_S), (small_S, 1, small_S, small_S)),\n )\n\n samples = []\n for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:\n # FIXME add an override for JIT and revert 0. back to 0\n # since it's accepted by eager\n for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float(\"inf\")]:\n for t1_size, t2_size in test_cases:\n # The args should never be non-contiguous as this is not supported in the backward\n samples.append(SampleInput(\n make_tensor(t1_size, device, dtype, requires_grad=requires_grad),\n args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad), p, cm)))\n\n return samples\n\n\ndef sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1,)),\n ((), (1,)),\n # For requires_grad=False below,\n # check https://github.com/pytorch/pytorch/issues/59137\n ((S, S, S), (make_arg((), requires_grad=False),)))\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (), False),\n ((S, S, S), (1,), False),\n ((S,), (1,), False),\n ((), (), False),\n )\n test_cases_lhs_broadcasting = (\n ((S, 1, S), (S, S, S), True),\n ((1,), (S, S, S), True),\n ((1, S), (1, 1, S), True),\n ((), (0,), True),\n ((), (S, S, S), True),\n )\n cases = test_cases + test_cases_lhs_broadcasting\n sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n broadcasts_input=broadcasts_input)\n for first_shape, second_shape, broadcasts_input in cases)\n equal_tensors_non_bool = (\n ([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),\n ([[[6, 5]], [[1, -5]]]),\n ([[2], [-1]]),\n ([0, -6]),\n ([3],),\n )\n equal_tensors_bool = (\n ([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),\n ([[[1, 1]], [[1, 0]]]),\n ([[1], [0]]),\n ([0, 1]),\n ([1],),\n )\n more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool\n more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),))\n for elements in more_cases)\n sample_inputs = [*sample_inputs, *more_inputs]\n return tuple(sample_inputs)\n\n\ndef sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors, args=(0,)),)\n\ndef sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]\n ((S, S), (S, S), {'dim': -1}),\n ((S, S), (S, S), {'dim': 1}),\n ((M, S), (S, S), {'dim': 0}), # different shapes\n ((1, 2, 3), (1, 2, 3), {'dim': -2}),\n ((0,), (0,), {'dim': 0}), # empty tensor\n ((0, S), (S, S), {'dim': 0}),\n ((1,), (1,), {}) # dim not passed, fallback to default\n )\n\n def generator():\n for input_shape1, input_shape2, kwargs in cases:\n yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors),)\n\ndef sample_inputs_hypot(op_info, device, dtype, requires_grad):\n input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n\n return (\n SampleInput(input, args=(args,)),\n )\n\ndef sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, gather_variable((S, S), 1, M, True, device=device))),\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006\n SampleInput(\n make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([], dtype=torch.uint8, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\n\ndef sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S), 1, S, True, device=device), 0)),\n\n # `indices` broadcast\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),\n\n # `self` broadcast\n SampleInput(make_tensor((1, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),\n\n # without `dim` arg\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), )),\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device),)),\n )\n\n\ndef sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):\n test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]\n ((S, S, S), {}),\n ((S, S, S), {'dim': 1}),\n ((S, S, S), {'dim': 1, 'keepdim': True}),\n ((), {'dim': 0}),\n ((), {}),\n ((), {'dim': 0, 'keepdim': True}),\n )\n\n samples: List[SampleInput] = []\n for shape, kwargs in test_cases:\n samples.append(SampleInput(\n make_tensor(shape, device, dtype, requires_grad=requires_grad),\n kwargs=kwargs))\n\n return samples\n\ndef sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n test_cases = (\n ((1,), 0, None, None),\n ((S,), 0, None, None),\n ((S, 1), 0, None, None),\n ((S, 1), 1, None, None),\n ((S, S), 0, None, None),\n ((S, S), 1, None, None),\n ((S, S), 0, (1, S), (2, S)),\n ((S, S), 0, None, (2, S)),\n ((S, S, S), 1, None, None),\n ((S, S, S), 2, None, None),\n ((S, S, S), 1, (S, 1, S), (S, 1, S)),\n ((S, S, S), 2, (S, S, 1), (S, S, 1)),\n ((S, S, S), 2, (S, S, S), (S, S, S)),)\n\n sample_inputs = []\n for size, dim, size_prepend, size_append in test_cases:\n prepend_size = 0 if (size_prepend is None) else size_prepend[dim]\n append_size = 0 if (size_append is None) else size_append[dim]\n dim_size = size[dim] + prepend_size + append_size\n for n in range(dim_size):\n input_tensor = make_arg(size)\n prepend = make_arg(size_prepend) if size_prepend else None\n append = make_arg(size_append) if size_append else None\n sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,)))\n\n # add some samples with n > dim_size\n sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,)))\n sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),)))\n\n return sample_inputs\n\ndef sample_inputs_histogram(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):\n input_tensor = make_arg(size)\n weight_tensor = make_arg(size) if weighted else None\n\n sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n bins_tensor = make_arg((bin_ct + 1,))\n sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n return sample_inputs\n\ndef sample_inputs_histogramdd(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S))\n bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3))\n\n sample_inputs = []\n for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]):\n input_tensor = make_arg(size)\n bin_ct = bin_ct_pattern[:size[-1]]\n weight_tensor = make_arg(size[:-1]) if weighted else None\n\n sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n bins_tensor = [make_arg(ct + 1) for ct in bin_ct]\n sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n return sample_inputs\n\ndef sample_inputs_histc(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for size, min, max in product(sizes, [0, -10], [0, 10]):\n # construct sample input omitting bins arg\n sample_inputs.append(SampleInput(make_arg(size),\n kwargs=dict(min=min, max=max)))\n\n # construct sample inputs with a few different bins values\n for bins in [1, 3, 10]:\n sample_inputs.append(SampleInput(make_arg(size),\n kwargs=dict(bins=bins, min=min, max=max)))\n\n return sample_inputs\n\ndef sample_inputs_bincount(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sample_inputs = []\n\n for size, weighted in product((S, M), [False, True]):\n input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device)\n weight_tensor = make_arg((size,)) if weighted else None\n\n max_val = int(input_tensor.max().item())\n\n for minlength in [0, max_val // 2, max_val, 2 * max_val]:\n sample_inputs.append(SampleInput(input_tensor,\n kwargs=dict(weights=weight_tensor, minlength=minlength)))\n\n return sample_inputs\n\ndef sample_inputs_bucketize(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n\n for size, out_int32, right in product(sizes, [False, True], [False, True]):\n input_tensor = make_arg(size)\n boundaries = make_arg((S,)).msort()\n\n sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ),\n kwargs=dict(out_int32=out_int32, right=right)))\n\n return sample_inputs\n\ndef sample_inputs_searchsorted(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M))\n inputs = []\n for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]):\n unsorted_tensor = make_arg(size, noncontiguous=noncontiguous)\n input_tensor = make_arg(size, noncontiguous=noncontiguous)\n if np.product(size) == 0:\n boundary_tensor = unsorted_tensor\n sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous)\n else:\n boundary_tensor, sorter = torch.sort(unsorted_tensor)\n side = \"right\" if right else \"left\"\n\n inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right)))\n inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side)))\n\n inputs.append(\n SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter)))\n inputs.append(\n SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter)))\n return inputs\n\ndef sample_inputs_gradient(op_info, device, dtype, requires_grad):\n sample_inputs = []\n test_cases_float = (\n ((S,), None, None, 1),\n ((S,), 2., None, 1),\n ((S, S), None, None, 2),\n ((S, S), [2.0, 2.1], None, 1),\n ((S, S), [2.0, 2.1], (0, 1), 1),\n ((4, 4, 4), [2., 1.], (0, 1), 2),\n )\n for size, spacing, dim, edge_order in test_cases_float:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))\n\n test_cases_tensor = (\n ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),\n ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),\n )\n for size, coordinates, dim, edge_order in test_cases_tensor:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n coordinates_tensor_list = []\n for coords in coordinates:\n # `coords` will always contain floating point values and Python 3.10 does not support this\n # implicit conversion to an integer using `__int__`\n # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed\n a = torch.tensor(coords, device=device)\n coordinates_tensor_list.append(a.to(dtype))\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_index_select(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, index_variable(2, S, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\ndef sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):\n test_args = [\n ([1, 2],),\n (slice(0, 3),),\n ([slice(0, 3), 1],),\n ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),\n ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),\n ([slice(None), slice(None), [0, 3]],),\n ([slice(None), [0, 3], slice(None)],),\n ([[0, 3], slice(None), slice(None)],),\n ([[0, 3], [1, 2], slice(None)],),\n ([[0, 3], ],),\n ([[0, 3], slice(None)],),\n ([[0, 3], Ellipsis],),\n ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),\n (index_variable(2, S, device=device),),\n (mask_not_all_zeros((S,)),),\n ]\n\n return tuple(SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=args)\n for args in test_args)\n\ndef sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n for accumulate in [False, True]:\n # Test with indices arg\n inputs.append(SampleInput(\n make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (index_variable(2, S, device=device), ),\n make_tensor((2, S), device, dtype, low=None, high=None)),\n kwargs=dict(accumulate=accumulate)))\n\n # Test with mask arg\n mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))\n inputs.append(SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (mask, ),\n make_tensor((S,), device, dtype, low=None, high=None),),\n kwargs=dict(accumulate=accumulate)))\n\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):\n # These testa are pretty much the same as those from index_copy.\n # Perhaps merge?\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n idx = make_arg((S,), dtype=torch.int64, low=0, high=S, requires_grad=False)\n\n samples = [SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(1,\n idx.detach().clone(),\n s.detach().clone().requires_grad_(requires_grad)))]\n\n for alpha in (-1, 0, 2):\n samples.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(1,\n idx.detach().clone(),\n s.detach().clone().requires_grad_(requires_grad)),\n kwargs=dict(alpha=alpha)))\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(0, idx.detach().clone(), s.detach().clone())) for t, idx, s in product(ts, idxs, ss))\n samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(0, idx.detach().clone(), s.detach().clone()),\n kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))\n return samples\n\ndef sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):\n def small_3d_unique():\n res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)\n res = res.to(dtype).requires_grad_(requires_grad)\n return res\n\n def large_1d_unique():\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype).requires_grad_(requires_grad)\n return res\n\n samples = []\n # Test case for large tensor.\n samples.append(SampleInput(large_1d_unique()))\n\n # Test cases for small 3d tensors.\n # Imitates legacy tests from test/test_torch.py\n dims = range(-3, 3)\n flag = [True, False]\n for dim, descending, stable in product(dims, flag, flag):\n # default schema without stable sort\n samples.append(SampleInput(small_3d_unique(),\n args=(dim, descending)))\n # schema with stable sort, no CUDA support yet\n if torch.device(device).type == 'cpu':\n samples.append(\n SampleInput(small_3d_unique(),\n kwargs=dict(dim=dim, descending=descending, stable=stable))\n )\n\n # Test cases for scalar tensor\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad)))\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),\n args=(0,)))\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),\n args=(0, True)))\n\n # Test cases for stable sort\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(stable=True)))\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(dim=0, stable=True)))\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(dim=0, descending=True, stable=True)))\n return samples\n\ndef sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n sizes = ((), (S,), (S, S), (S, S, S))\n samples = []\n for x_size in sizes:\n # threshold and values args must be numbers\n samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item())))\n return samples\n\ndef sample_inputs_argsort(*args, **kwargs):\n return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if \"stable\" not in sample_input.kwargs]\n\ndef sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for shape, sorted, return_inverse, return_counts, dim in \\\n product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]):\n # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim\n if 0 in shape and shape.index(0) is not dim:\n continue\n\n # skip invalid dim args\n if dim is not None and (dim < -len(shape) or dim >= len(shape)):\n continue\n\n kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)\n\n # construct a test case with only one distinct value\n input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n # construct a test case with mixed 0s and 1s\n input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\\\n .to(dtype).requires_grad_(requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n # construct a test case with many different values\n input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n return sample_inputs\n\ndef sample_inputs_unique_consecutive(*args, **kwargs):\n def generator():\n for sample_input in sample_inputs_unique(*args, **kwargs):\n if not sample_input.kwargs[\"sorted\"]:\n sample_input.kwargs.pop(\"sorted\")\n yield sample_input\n\n return list(generator())\n\ndef sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n index_tensor = partial(torch.tensor, device=device, dtype=torch.long)\n\n samples = []\n fill_val = torch.tensor(-1 + 1j if dtype.is_complex else -1)\n idx = index_variable(1, S, device=device)\n ndim = 3\n for d in range(ndim):\n samples.append(SampleInput(make_arg((S,) * ndim), args=(d, idx, fill_val)))\n samples.append(SampleInput(make_arg((S,) * ndim), args=(d, -idx - 1, fill_val)))\n\n\n def unique_idx(numel, max_idx):\n # Generate unique random indices vector of `numel`\n # elements in range [0, max_idx).\n indices = random.sample(range(max_idx), numel)\n return index_tensor(indices)\n\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))\n\n # Duplicate indices\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))\n\n return samples\n\ndef sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_binary_op = (\n ((S, S, S), (S, S, S),),\n ((S, S, S), (S,),),\n ((S,), (S, S, S),),\n ((S, 1, S), (S, S),),\n ((S, S), (S, S),),\n ((), (),),\n ((S, S, S), (),),\n ((), (S, S, S),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(make_tensor(other_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),),))\n for input_tensor, other_tensor in args_for_binary_op)\n return inputs\n\n\ndef sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((0, 8, 8), (5,)),\n ((3, 8, 8), 5),\n ((3, 8, 8), 1)\n )\n\n def generator():\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\n return list(generator())\n\ndef sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((1, 8, 8, 8), (5, 7)),\n ((2, 8, 8, 8), (None, 7)),\n ((1, 8, 4, 3), (5, None)),\n ((1, 8, 4, 3), (None, None)),\n ((1, 8, 4, 3), (5)),\n )\n\n def generator():\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\n return list(generator())\n\n\ndef sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((0, 8, 8, 8, 8), (5, 7, 4)),\n ((1, 8, 4, 3, 7), (None, None, None)),\n ((1, 8, 4, 3, 7), (1, 1, 1)),\n ((3, 3, 8, 8, 6), (5, 7, None)),\n ((1, 3, 8, 8, 6), (5, None, 2)),\n ((3, 3, 8, 8, 6), (None, 3, 2)),\n )\n\n def generator():\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\n return list(generator())\n\ndef sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8), (5,)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((3, 4, 4), 3),\n ((3, 4, 4), 1)\n )\n\n def generator():\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\n return list(generator())\n\ndef sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8, 8), (5, 7)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((1, 4, 4, 4), (2, 3)),\n ((2, 4, 4, 4), (None, 3)),\n ((2, 4, 4, 4), (1, 1)),\n ((1, 4, 4, 3), (3, None)),\n ((1, 4, 4, 3), (None, None)),\n ((1, 4, 4, 3), (3)),\n )\n\n def generator():\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\n return list(generator())\n\n\ndef sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8, 8, 8), (5, 7, 4)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((1, 4, 4, 3, 5), (None, None, None)),\n ((1, 4, 4, 3, 5), (1, 1, 1)),\n ((3, 3, 4, 4, 6), (2, 3, None)),\n ((1, 3, 4, 4, 6), (3, None, 2)),\n ((3, 3, 4, 4, 6), (None, 3, 2)),\n )\n\n def generator():\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\n return list(generator())\n\nclass _TestParamsMaxPoolBase(object):\n\n def __init__(self):\n self.kwargs = {\n 'kernel_size': [3],\n 'stride': [2, None],\n 'ceil_mode': [True, False],\n 'padding': [0, 1],\n 'dilation': [1],\n 'return_indices': [True, False]\n }\n\n self.shapes = [\n [1, 2, None], # batch\n [2], # channels\n [3, 6] # signal\n ]\n\n def _gen_shape(self):\n for shape in product(*self.shapes):\n # shape[0] is None indicates missing batch dimension\n if shape[0] is None:\n shape = shape[1:]\n\n yield shape, torch.contiguous_format\n # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format\n if len(self.shapes) == 4 and len(shape) == 4:\n yield shape, torch.channels_last\n\n def _gen_kwargs(self):\n keys = self.kwargs.keys()\n for values in product(*self.kwargs.values()):\n yield dict(zip(keys, values))\n\n def gen_input_params(self):\n yield from product(self._gen_shape(), self._gen_kwargs())\n\nclass _TestParamsMaxPool1d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3,)]\n self.kwargs['stride'] += [(2,)]\n self.kwargs['padding'] += [(1,)]\n self.kwargs['dilation'] += [(1,)]\n\nclass _TestParamsMaxPool2d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3, 2)]\n self.kwargs['stride'] += [(2, 1)]\n self.kwargs['padding'] += [(1, 1)]\n self.kwargs['dilation'] += [(1, 2)]\n\n self.shapes.append([6])\n\nclass _TestParamsMaxPool3d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3, 2, 3)]\n self.kwargs['stride'] += [(2, 1, 2)]\n self.kwargs['dilation'] += [(1, 2, 1)]\n\n self.shapes.append([6])\n self.shapes.append([5])\n\ndef sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n params_generator_type_dict = {\n 'nn.functional.max_pool1d': _TestParamsMaxPool1d,\n 'nn.functional.max_pool2d': _TestParamsMaxPool2d,\n 'nn.functional.max_pool3d': _TestParamsMaxPool3d,\n }\n\n def generator():\n params_generator = params_generator_type_dict[op_info.name]()\n for (shape, memory_format), kwargs in params_generator.gen_input_params():\n arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad)\n yield SampleInput(arg, kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((2, 1, 4, 5), {'p': 1., 'dim': 2}),\n ((2, 3, 4, 5), {'p': 2., 'dim': 1}),\n ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),\n ((1, 3, 4, 5), {'p': -1., 'dim': 1}),\n ((1, 3, 4, 5), {'p': 0., 'dim': -1}),\n ((), {'p': 1.2, 'dim': 0}),\n ((2, 3, 4, 5), {}),\n ((2, 3, 4, 5), {'eps': 1e-4}))\n\n def generator():\n for input_shape, kwargs in cases:\n yield SampleInput(make_arg(input_shape), kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4), (3, 3, 3), (3,),\n {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}),\n ((2, 2, 4), (2, 2, 4), (4,),\n {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}),\n ((1, 1, 4), (1, 1, 4), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}),\n ((1, 1, 4), (1, 2, 3), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5), (4, 8, 3), None,\n {})\n )\n\n def generator():\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4, 4), (3, 3, 3, 3), (3,),\n {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),\n ((2, 2, 4, 4), (2, 2, 4, 5), (4,),\n {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),\n ((1, 1, 4, 5), (1, 1, 4, 3), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 1, 4, 3), (1, 2, 3, 4), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5, 5), (4, 8, 3, 3), None,\n {})\n )\n\n def generator():\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,),\n {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}),\n ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,),\n {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}),\n ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}),\n ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None,\n {})\n )\n\n def generator():\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n return list(generator())\n\n\ndef sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias,\n # and a dict of values of (stride, padding, dilation, groups)\n cases: Tuple = (\n ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}),\n ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}),\n ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}),\n ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}),\n # With defaults\n ((1, 4, 5), (3, 4, 3), None, {}),\n )\n\n # TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged\n # Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck\n # in test/test_nn.py\n\n def generator():\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n return list(generator())\n\n\ndef sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, groups, dilation)\n cases: Tuple = (\n ((1, 3, 4, 4), (3, 3, 3, 3), (3,),\n {'stride': (2, 2), 'padding': 2, 'groups': 1}),\n ((2, 4, 8, 8), (2, 2, 3, 3), (2,),\n {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 2, 4, 3), (4, 2, 3, 4), None,\n {'stride': 2, 'padding': 1, 'groups': 1}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': \"valid\"}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 1, 'padding': \"same\", 'dilation': 3}),\n # Below are the group related samples from common_nn.py\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}),\n ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}),\n ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}),\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}),\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}),\n ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}),\n ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}),\n # With defaults\n ((1, 4, 5, 5), (3, 4, 3, 3), None, {}),\n )\n\n def generator():\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n return list(generator())\n\n\ndef sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, num groups, and eps\n cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment]\n ((1, 6, 3), 2, 0.5),\n ((2, 6, 3), 2, -0.5),\n ((1, 2), 1, None),\n ((0, 2), 1, None),\n )\n\n def generator():\n for input_shape, num_groups, eps in cases:\n # Shape of weight and bias should be the same as num_channels\n weight = make_arg(input_shape[1])\n bias = make_arg(input_shape[1])\n kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps}\n yield SampleInput(\n make_arg(input_shape),\n args=(num_groups,),\n kwargs=kwargs\n )\n # Without any optional args\n yield SampleInput(make_arg((1, 2)), args=(1,))\n\n return list(generator())\n\n\ndef sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n # Ordered as: input shape, kwargs for momentum, eps\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((S, S, S), {'momentum': 0.5, 'eps': 0.6}),\n ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}),\n ((3, 2, 4), {'momentum': -1.2}),\n ((3, 2, 4), {'momentum': 0.0}),\n ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),\n ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),\n )\n\n def generator():\n for input_shape, kwargs in cases:\n # args: running mean, running var, weight and bias should necessarily be of shape: (channels,)\n channels = input_shape[1]\n weight = make_arg(channels)\n bias = make_arg(channels)\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n new_kwargs = {\n 'running_mean': running_mean,\n 'running_var': running_var,\n 'weight': weight,\n 'bias': bias,\n **kwargs\n }\n\n yield SampleInput(\n make_arg(input_shape),\n args=(),\n kwargs=new_kwargs\n )\n\n # Checking for permutations of weights and biases as `None`\n # instance_norm assumes that if there's a bias, there's a weight\n weights = [channels, None]\n biases = [None, None]\n\n for weight_channels, bias_channels in zip(weights, biases):\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n yield SampleInput(\n make_arg(input_shape),\n args=(),\n kwargs={\n 'running_mean': running_mean,\n 'running_var': running_var,\n 'weight': make_arg(weight_channels) if weight_channels is not None else None,\n 'bias': make_arg(bias_channels) if bias_channels is not None else None\n }\n )\n\n # Test case for no optional kwargs\n yield SampleInput(make_arg((1, 2, 3)), kwargs={})\n\n return list(generator())\n\n\ndef sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, normalized_shape and a kwarg dict for eps\n cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 2, 3), (1, 2, 3), {'eps': 0.5}),\n ((2, 2, 3), (2, 3), {'eps': -0.5}),\n ((1,), (1,), {}),\n ((1, 2), (2,), {}),\n ((0, 1), (1,), {}),\n )\n\n def generator():\n for input_shape, normalized_shape, kwargs in cases:\n # Shape of weight and bias should be the same as normalized_shape\n weight = make_arg(normalized_shape)\n bias = make_arg(normalized_shape)\n yield SampleInput(\n make_arg(input_shape),\n args=(normalized_shape, weight, bias),\n kwargs=kwargs\n )\n # Without any optional args\n yield SampleInput(make_arg((1, 2)), args=((2,),))\n\n # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,\n # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400\n\n # With weight and a `None` bias\n # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))\n\n # With `None` weight and bias (tests failing for this, see the link above)\n # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))\n\n return list(generator())\n\n\ndef sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, size and a kwarg dict for alpha, beta, and k\n cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}),\n ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}),\n ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}),\n ((1, 6, 3), 2, {'alpha': 3e-05}),\n ((1, 6, 3), 2, {'beta': 0.5}),\n ((1, 6, 3), 2, {'k': 1.25}),\n ((1, 6, 3), 2, {}),\n ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n )\n\n def generator():\n for input_shape, size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs)\n\n return list(generator())\n\n\ndef sample_inputs_hardswish(self, device, dtype, requires_grad):\n N = 5\n # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?\n tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_linear(self, device, dtype, requires_grad):\n features_options = [[3, 4], [8, 8]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for has_bias, (in_feat, out_feat), batch_shape in \\\n itertools.product([True, False], features_options, batch_options):\n input_tensor = create_tensor(batch_shape + [in_feat])\n weight = create_tensor([out_feat, in_feat])\n if not has_bias:\n sample_inputs.append(SampleInput(input_tensor, args=(weight,)))\n continue\n\n bias = create_tensor([out_feat])\n sample_inputs.append(SampleInput(input_tensor, args=(weight, bias)))\n return sample_inputs\n\ndef sample_inputs_bilinear(self, device, dtype, requires_grad):\n features_options = [[3, 4, 5], [8, 8, 8]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \\\n itertools.product([True, False], features_options, batch_options):\n input_tensor1 = create_tensor(batch_shape + [in_feat1])\n input_tensor2 = create_tensor(batch_shape + [in_feat2])\n weight = create_tensor([out_feat, in_feat1, in_feat2])\n if not has_bias:\n sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,)))\n continue\n bias = create_tensor([out_feat])\n sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias)))\n\n return sample_inputs\n\ndef sample_inputs_glu(self, device, dtype, requires_grad):\n features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for features, batch_shape in itertools.product(features_options, batch_options):\n ndim = len(features) + len(batch_shape)\n for dim in range(ndim):\n input_tensor = create_tensor(batch_shape + features)\n dim_size = input_tensor.size(dim)\n if dim_size > 0 and dim_size % 2 == 0:\n sample_inputs.append(SampleInput(input_tensor, args=(dim,)))\n\n return sample_inputs\n\ndef sample_inputs_interpolate(mode, self, device, dtype, requires_grad):\n N, C = 2, 3\n D = 4\n S = 3\n L = 5\n\n align_corners_options: Tuple[Any, ...] = (None,)\n if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):\n align_corners_options = (True, False, None)\n ranks_for_mode = {\n 'nearest': [1, 2, 3],\n 'linear': [1],\n 'bilinear': [2],\n 'bicubic': [2],\n 'trilinear': [3],\n 'area': [1, 2, 3]\n }\n\n def shape(size, rank, with_batch_channel=True):\n if with_batch_channel:\n return tuple([N, C] + ([size] * rank))\n return tuple([size] * rank)\n\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-1, high=1)\n\n sample_inputs = []\n for align_corners in align_corners_options:\n for rank in ranks_for_mode[mode]:\n sample_inputs.extend([\n SampleInput(make_arg(shape(D, rank)),\n args=(shape(S, rank, False), None, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(shape(L, rank, False), None, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(None, 1.7, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(None, 0.6, mode, align_corners)),\n ])\n\n return sample_inputs\n\ndef sample_inputs_upsample(mode, self, device, dtype, requires_grad):\n N, C = 2, 3\n D = 4\n S = 3\n L = 5\n\n ranks_for_mode = {\n 'nearest': [1, 2, 3],\n 'bilinear': [2],\n }\n\n def shape(size, rank, with_batch_channel=True):\n if with_batch_channel:\n return tuple([N, C] + ([size] * rank))\n return tuple([size] * rank)\n\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-1, high=1)\n\n sample_inputs = []\n for rank in ranks_for_mode[mode]:\n sample_inputs.extend([\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(size=shape(S, rank, False))),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(size=shape(L, rank, False))),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(scale_factor=1.7)),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(scale_factor=0.6)),\n ])\n\n return sample_inputs\n\ndef sample_inputs_gelu(self, device, dtype, requires_grad):\n N = 5\n tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-3, high=3)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_reduction_with_dim = (\n ((S, S, S), (1,),),\n ((S, S, S), (1, True, ),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args_for_reduction_with_dim)\n return inputs\n\ndef sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n inputs.append(SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n return inputs\n\ndef _generate_nan_reduction_inputs(device, dtype, requires_grad):\n yield from _generate_reduction_inputs(device, dtype, requires_grad)\n yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)\n yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)\n\ndef sample_inputs_nan_reduction(supports_multiple_dims):\n # Generates sample inputs for reduction ops that contain the input tensor\n # and dim and keepdim kwargs. If a reduction op needs to test additional\n # args/kwargs then create a separate sample_inputs function\n def fn(op_info, device, dtype, requires_grad):\n inputs = []\n\n for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n kwargs=kwargs))\n\n return inputs\n\n return fn\n\ndef sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):\n test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))\n test_interpolations = ['linear', 'midpoint']\n\n inputs = []\n for quantiles in test_quantiles:\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(quantiles,)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):\n # Interpolation kwarg for now is only supported when providing both dim and keepdim\n kwargs.setdefault('dim', 0)\n kwargs.setdefault('keepdim', False)\n for interpolation in test_interpolations:\n kwargs['interpolation'] = interpolation\n inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(quantiles,), kwargs=kwargs))\n\n return inputs\n\ndef sample_inputs_reduction_count_nonzero(*args, **kwargs):\n \"\"\"Sample inputs for count_nonzero\"\"\"\n samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)\n # count_nonzero does not support keepdim yet\n for sample in samples:\n sample.kwargs.pop('keepdim', None)\n return samples\n\ndef sample_inputs_leaky_relu(op_info, device, dtype, requires_grad):\n N = 10\n tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,\n requires_grad=requires_grad)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size\n cases = (((1, 3, 9, 9), 3),\n ((1, 3, 9, 9), (4, 4)),\n ((1, 3, 9, 9), (6, 6)),\n ((2, 3, 9, 9), (3, 3)),\n ((1, 1, 4, 4), (2, 2)),\n ((1, 2, 6, 6), (4, 4)))\n\n samples = []\n\n for input_shape, kernel_size in cases:\n for return_indices in [False, True]:\n # test case passing a single output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2), return_indices=return_indices)\n ))\n\n # test case passing a tuple output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2, 3), return_indices=return_indices)\n ))\n\n # test case passing an output ratio\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices)\n ))\n\n return samples\n\ndef sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size\n cases = (((2, 3, 5, 5, 5), (2, 2, 2)),\n ((1, 2, 6, 5, 4), 2),\n ((1, 2, 5, 6, 5), (2, 3, 2)),\n ((1, 2, 6, 6, 6), (2, 3, 2)),\n ((1, 1, 7, 6, 7), (2, 3, 4)),\n ((1, 1, 4, 5, 4), (2, 2, 1)),\n ((1, 1, 8, 7, 6), (4, 3, 2)),\n ((0, 1, 4, 5, 4), (2, 2, 1)))\n\n samples = []\n\n for input_shape, kernel_size in cases:\n for return_indices in [False, True]:\n # test case passing a single output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2), return_indices=return_indices)\n ))\n\n # test case passing a tuple output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices)\n ))\n\n # test case passing an output ratio\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices)\n ))\n\n return samples\n\ndef sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override\n cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),\n ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),\n ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),\n ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),\n ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),\n ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))\n\n def generator():\n for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:\n yield SampleInput(make_arg(input_shape),\n args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))\n # Case with just input_shape and kernel_size\n yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))\n\n return list(generator())\n\ndef sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, kwargs\n cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [\n ((2, 3, 9), (3,), dict()),\n ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)),\n ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)),\n ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)),\n ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)),\n ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)),\n ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)),\n ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)),\n ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)),\n ]\n\n def generator():\n for input_shape, kernel_size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override\n cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [\n ((2, 3, 3, 4, 4), (2, 2, 2), dict()),\n ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True,\n count_include_pad=False, divisor_override=2)),\n ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True,\n count_include_pad=True, divisor_override=2)),\n ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)),\n ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False,\n count_include_pad=False, divisor_override=2)),\n ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False,\n count_include_pad=True, divisor_override=-2)),\n ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True,\n count_include_pad=True, divisor_override=None)),\n ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False,\n count_include_pad=True, divisor_override=None)),\n ]\n\n def generator():\n for input_shape, kernel_size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)\n\n return list(generator())\n\ndef sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):\n def get_tensor_input(size):\n return make_tensor(size, device, dtype, requires_grad=requires_grad)\n\n inputs = []\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))\n\n inputs.append(SampleInput(get_tensor_input(()), args=(1,)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))\n\n return inputs\n\ndef sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)\n arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(arg_a, args=(arg_b,)))\n return inputs\n\n\ndef sample_inputs_igamma_igammac(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, low=1e-3)\n cases = (((S, S), (S, S), False),\n ((S, S), (S, ), False),\n ((S, ), (S, S), True),\n ((), (), False))\n\n def generator():\n for shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(other_shape, requires_grad=False),),\n broadcasts_input=broadcasts_input)\n\n return list(generator())\n\n\ndef sample_inputs_dist(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))\n ps = (2, 4)\n\n def generate_samples():\n for size_x, size_y, p in product(sizes, sizes, ps):\n yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))\n\n return list(generate_samples())\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape, low=None, high=None, dtype=dtype, requires_grad=requires_grad):\n return make_tensor(shape, device=device, dtype=dtype,\n low=low, high=high, requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n # idx is a permutation of 0...S-1 for this function to be deterministic\n idx = torch.randperm(S, device=device, dtype=torch.int64)\n\n samples = [SampleInput(t, args=(1, idx, s))]\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))\n return samples\n\ndef sample_inputs_mode(op_info, device, dtype, requires_grad):\n inputs = []\n args = (\n ((S, S, S), (),),\n ((S, S, S), (1, ),),\n ((S, S, S), (1, True, ),),\n ((), (),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args)\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_put(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs\n idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]\n idx_list = [idx, -idx - 1]\n for idx, acc in product(idx_list, (True, False)):\n yield SampleInput(input=make_arg((S, S)),\n args=(idx.detach().clone(),\n make_arg((S,)),\n acc))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n tgt_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n src_gen = (make_arg(size) for size in scalar_sizes)\n for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):\n yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),\n args=(idx.detach().clone(),\n src.detach().clone().requires_grad_(requires_grad),\n acc))\n\n # Empty cases\n tgt_sizes = [(0,), (), (1,), (3, 2)]\n tgt_gen = (make_arg(size) for size in tgt_sizes)\n idx = make_idx((0,), high=1)\n src = make_arg((0,))\n for tgt, acc in product(tgt, (True, False)):\n yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),\n args=(idx.detach().clone(),\n src.detach().clone().requires_grad_(requires_grad),\n acc))\n\n return list(gen_inputs())\n\ndef sample_inputs_take(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs: take S elements out of S * S\n index = make_idx((S,), high=(S * S))\n for idx in (index, -index - 1):\n yield SampleInput(input=make_arg((S, S)), args=(idx,))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n src_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n for src, idx in product(src_gen, idx_gen):\n yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),\n args=(idx.detach().clone(),))\n\n # Empty cases\n src_sizes = [(0,), (), (1,), (3, 2)]\n src_gen = (make_arg(size) for size in src_sizes)\n idx = make_idx((0,), high=1)\n for src in src_gen:\n yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),\n args=(idx.detach().clone(),))\n\n return list(gen_inputs())\n\ndef sample_movedim_moveaxis(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=([0, 1, 2, 3], [3, 2, 1, 0])),\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=([0, -1, -2, -3], [-3, -2, -1, -0]))\n )\n\n\ndef sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)\n shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))\n\n if requires_grad:\n # Tests for variant_consistency_jit, grad, gradgrad\n # are slower. Use smaller bags of `rep_dims` and `shapes`\n # in this case.\n rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]\n shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]\n\n samples = []\n for rep_dim, shape in product(rep_dims, shapes):\n # `torch.repeat` errors for `len(rep_dims) < t.dim()`,\n # so we filter such combinations.\n if op_info.name == 'repeat' and len(rep_dim) < len(shape):\n continue\n samples.append(SampleInput(make_arg(shape), args=(rep_dim,),))\n\n return samples\n\n\ndef sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, S, S), (1, 2, 2)),\n ((S, S, S), (-1, 2, 2)),\n ((S, S, S), (1, 0, 0)),\n ((S, S, S), (-1, 0, 0)),\n )\n\n def generator():\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n yield SampleInput(tensor, args=args)\n\n return list(generator())\n\ndef sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):\n y_shape_x_shape_and_kwargs = [\n ((2, 3), (2, 3), {}),\n ((2, 3), (2, 3), {'dim': 1}),\n ((6,), (6,), {}),\n ((6,), None, {}),\n # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad\n # See Issue #{61619}\n # ((6,0), (6,0), {}),\n ((2, 3), (1, 3), {}),\n ((3, 3), (3, 3), {}),\n ((3, 3), (3, 3), {'dim': -2}),\n ((5,), None, {'dx': 2.0}),\n ((2, 2), None, {'dx': 3.0})\n ]\n samples = []\n for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:\n y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n if x_shape is not None:\n x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))\n else:\n samples.append(SampleInput(y_tensor, kwargs=kwarg))\n return samples\n\ndef sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):\n\n y_shape_x_shape_and_kwargs = [\n ((2, 3), (2, 3), {}),\n ((2, 3), (2, 3), {'dim': 1}),\n ((6,), (6,), {}),\n ((6,), None, {}),\n # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad\n # See Issue #{61619}\n # ((6,0), (6,0), {}),\n ((2, 3), (1, 3), {}),\n ((3, 3), (3, 3), {}),\n ((3, 3), (3, 3), {'dim': -2}),\n ((5,), None, {'dx': 2.0}),\n ((2, 2), None, {'dx': 3.0})\n ]\n samples = []\n for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:\n y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n if x_shape is not None:\n x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))\n else:\n samples.append(SampleInput(y_tensor, kwargs=kwarg))\n return samples\n\ndef sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_axes = [\n ((3, 4, 5), 0),\n ((3, 4, 5), 1),\n ((3, 4, 5), 3),\n ((3, 4, 5), -1),\n ((3, 4, 5), -3),\n ((), 0)\n ]\n\n samples = []\n for shape, axis in shapes_and_axes:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(tensor, args=(axis,),))\n\n return samples\n\n\ndef sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):\n shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))\n kernel_sizes = (2, (2, 2), (3, 3))\n dilations = (1, 2, (1, 2))\n paddings = (0, 1, (1, 1))\n strides = (1, 2, (1, 2))\n\n def generator():\n cases = product(shapes, kernel_sizes, dilations, paddings, strides)\n for shape, kernel_size, dilation, padding, stride in cases:\n tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)\n yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))\n\n # With default args\n yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),\n args=((3, 3),))\n\n return list(generator())\n\n\ndef sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, 1, S, 1), ()),\n ((1, 1, 1, 1), ()),\n ((S, 1, S, 1), (1,)),\n ((S, 1, S, 1), (-1,)),\n ((S, 1, S, 1), (2,)),\n ((S, 1, S, 1), (-2,)),\n ((), (0, )),\n )\n\n def generator():\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n\n yield SampleInput(tensor, args=args)\n\n return list(generator())\n\n\ndef sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):\n assert mode in ('constant', 'reflect', 'replicate', 'circular')\n if mode in ['reflect', 'replicate']:\n cases: tuple = ( # ignore\n ((1, 3), (1, 2)),\n ((1, 3), (0, 1)),\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((1, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n elif mode == 'constant':\n cases = (\n ((1, 3), (1, 2)),\n ((1, 3), (0, 1)),\n ((1, 3), (0, 2, 0, 1)),\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((0, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((1, 3, 3), (0, 2, 0, 1)),\n ((1, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((0, 3, 3, 3), (1, 2)),\n ((0, 3, 3, 3), (0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((3, 3, 5, 5), (1, 2)),\n ((3, 3, 5, 5), (0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3, 3, 3), (1, 2)),\n ((1, 3, 3, 3, 3), (0, 1)),\n ((1, 3, 3, 3, 3), (0, 2, 0, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n else: # mode == 'circular'\n if dtype == torch.bool:\n # test_dtypes fails on ASAN with for the case ab\n # runtime error: load of value 190, which is not a valid value for type 'bool'\n # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562\n # Reference Issue: https://github.com/pytorch/pytorch/issues/63034\n cases = (\n ((2, 3, 3), (1, 2)),\n ((1, 3, 3), (1, 2)),\n )\n else:\n cases = (\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n\n make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def generator():\n if mode == 'constant':\n # Default args\n yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))\n\n if mode in ['reflect', 'replicate', 'circular']:\n for shape, pad in cases:\n yield SampleInput(make_inp(shape), args=(pad, mode))\n else: # mode == 'constant'\n for pad_value in (1., 2.):\n for shape, pad in cases:\n yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))\n\n return list(generator())\n\n\n# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet\n# Creates matrices with a positive nonzero determinant\ndef sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):\n def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):\n u, s, vh = torch.linalg.svd(A, full_matrices=False)\n s.clamp_(min=min_singular_value)\n A = (u * s.unsqueeze(-2)) @ vh\n det = A.det()\n if sign is not None:\n if A.dim() == 2:\n if (det < 0) ^ (sign < 0):\n A[0, :].neg_()\n else:\n cond = ((det < 0) ^ (sign < 0)).nonzero()\n if cond.size(0) > 0:\n for i in range(cond.size(0)):\n A[list(cond[i])][0, :].neg_()\n return A\n\n samples = []\n\n # cases constructed using make_tensor()\n tensor_shapes = (\n (S, S),\n (1, 1),\n (3, 3, S, S),\n (3, 3, 1, 1)\n )\n\n for shape in tensor_shapes:\n t = make_tensor(shape, device=device, dtype=dtype)\n d = make_nonzero_det(t).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n # cases constructed using:\n # 1) make_symmetric_matrices\n # 2) make_symmetric_pd_matrices\n # 3) make_fullrank_matrices_with_distinct_singular_values\n symmetric_shapes = (\n (S, S),\n (3, S, S),\n )\n\n\n def _helper(constructor, *shape, **kwargs):\n t = constructor(*shape, device=device, dtype=dtype)\n d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n for shape in symmetric_shapes:\n _helper(make_symmetric_matrices, *shape)\n _helper(make_symmetric_pd_matrices, *shape)\n _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)\n\n return tuple(samples)\n\ndef np_unary_ufunc_integer_promotion_wrapper(fn):\n # Wrapper that passes PyTorch's default scalar\n # type as an argument to the wrapped NumPy\n # unary ufunc when given an integer input.\n # This mimicks PyTorch's integer->floating point\n # type promotion.\n #\n # This is necessary when NumPy promotes\n # integer types to double, since PyTorch promotes\n # integer types to the default scalar type.\n\n # Helper to determine if promotion is needed\n def is_integral(dtype):\n return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]\n\n @wraps(fn)\n def wrapped_fn(x):\n # As the default dtype can change, acquire it when function is called.\n # NOTE: Promotion in PyTorch is from integer types to the default dtype\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n\n if is_integral(x.dtype):\n return fn(x.astype(np_dtype))\n return fn(x)\n\n return wrapped_fn\n\ndef sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):\n nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,\n dtype=dtype, requires_grad=requires_grad)\n oned_tensor = partial(make_tensor, (31,), device=device,\n dtype=dtype, requires_grad=requires_grad)\n\n if self.ndimensional == SpectralFuncType.ND:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(s=(8,))),\n SampleInput(oned_tensor()),\n\n *(SampleInput(nd_tensor(),\n kwargs=dict(dim=dim))\n for dim in [-1, -2, -3, (0, -1)]),\n ]\n elif self.ndimensional == SpectralFuncType.TwoD:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(s=(6, 8))),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=0)),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=(0, -1))),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=(-3, -2, -1))),\n ]\n else:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(n=10, dim=1, norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(n=7)),\n SampleInput(oned_tensor()),\n\n *(SampleInput(nd_tensor(),\n kwargs=dict(dim=dim))\n for dim in [-1, -2, -3]),\n ]\n\ndef sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_input(()), kwargs=dict(repeats=2)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1))\n ]\n\nSpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))\n\n# Metadata class for Fast Fourier Transforms in torch.fft.\nclass SpectralFuncInfo(OpInfo):\n \"\"\"Operator information for torch.fft transforms. \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # Reference implementation (probably in np.fft namespace)\n dtypes=floating_and_complex_types(),\n ndimensional: SpectralFuncType,\n sample_inputs_func=sample_inputs_spectral_ops,\n decorators=None,\n **kwargs):\n decorators = list(decorators) if decorators is not None else []\n decorators += [\n skipCPUIfNoFFT,\n skipCUDAIfRocm,\n ]\n\n super().__init__(name=name,\n dtypes=dtypes,\n decorators=decorators,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n self.ndimensional = ndimensional\n\n\ndef sample_inputs_stft(op_info, device, dtype, requires_grad):\n def mt(shape, **kwargs):\n return make_tensor(shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n yield SampleInput(mt(100), kwargs=dict(n_fft=10))\n\n for center in [False, True]:\n yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center))\n yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center))\n\n window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device)\n yield SampleInput(\n mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))\n yield SampleInput(\n mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))\n if not dtype.is_complex:\n yield SampleInput(\n mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False))\n\n\ndef sample_inputs_istft(op_info, device, dtype, requires_grad):\n def mt(shape, **kwargs):\n real_shape = shape if dtype.is_complex else shape + (2,)\n return make_tensor(real_shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n\n yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10))\n yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False))\n yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True))\n\n for center in [False, True]:\n yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center))\n yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center))\n\n window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device)\n yield SampleInput(mt((10, 10, 6)), kwargs=dict(\n n_fft=10, window=window, center=center, return_complex=dtype.is_complex))\n yield SampleInput(mt((10, 10, 10)), kwargs=dict(\n n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True))\n\n real_window = window if not dtype.is_complex else window.real\n yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center))\n\n\ndef sample_inputs_fftshift(op_info, device, dtype, requires_grad):\n def mt(shape, **kwargs):\n return make_tensor(shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n\n yield SampleInput(mt((9, 10)))\n yield SampleInput(mt((50,)), kwargs=dict(dim=0))\n yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))\n yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))\n yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))\n\n\nclass ShapeFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for Shape manipulating operations like tile and roll\"\"\"\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n sample_inputs_func=None,\n **kwargs):\n super(ShapeFuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n\ndef sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):\n if same_size:\n return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]\n else:\n return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]\n\n\ndef get_foreach_method_names(name):\n # get torch inplace reference function\n op_name = \"_foreach_\" + name\n inplace_op_name = \"_foreach_\" + name + \"_\"\n\n op = getattr(torch, op_name, None)\n inplace_op = getattr(torch, inplace_op_name, None)\n\n ref = getattr(torch, name, None)\n ref_inplace = getattr(torch.Tensor, name + \"_\", None)\n return op, inplace_op, ref, ref_inplace\n\nclass ForeachFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for foreach functions\"\"\"\n def __init__(self,\n name,\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n dtypesIfROCM=None,\n safe_casts_outputs=True,\n supports_alpha_param=False,\n sample_inputs_func=sample_inputs_foreach,\n **kwargs):\n super().__init__(\n \"_foreach_\" + name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n safe_casts_outputs=safe_casts_outputs,\n sample_inputs_func=sample_inputs_func,\n **kwargs\n )\n\n foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)\n self.method_variant = foreach_method\n self.inplace_variant = foreach_method_inplace\n self.ref = torch_ref_method\n self.ref_inplace = torch_ref_inplace\n self.supports_alpha_param = supports_alpha_param\n\n if name == \"norm\":\n self.ref = torch.linalg.vector_norm\n\n\ndef sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):\n # Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n inputs = (\n torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix\n torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices\n random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix\n random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices\n )\n test_cases = (torch.linalg.cholesky(a) for a in inputs)\n out = []\n for a in test_cases:\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n out.append(SampleInput(a.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=True)))\n return out\n\ndef sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n\n device = torch.device(device)\n\n drivers: Tuple[str, ...]\n if device.type == 'cuda':\n drivers = ('gels',)\n else:\n drivers = ('gels', 'gelsy', 'gelss', 'gelsd')\n\n # we generate matrices of shape (..., n + delta, n)\n deltas: Tuple[int, ...]\n if device.type == 'cpu' or has_cusolver():\n deltas = (-1, 0, +1)\n # only square systems if Cusolver is not available\n # becase we solve a lstsq problem with a transposed matrix in the backward\n else:\n deltas = (0,)\n\n out = []\n for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):\n shape = batch + (3 + delta, 3)\n a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)\n a.requires_grad_(requires_grad)\n b = make_tensor(shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver)))\n return out\n\ndef sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.householder_product (torch.orgqr).\n The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.\n Empty, square, rectangular, batched square and batched rectangular input is generated.\n \"\"\"\n # Each column of the matrix is getting multiplied many times leading to very large values for\n # the Jacobian matrix entries and making the finite-difference result of grad check less accurate.\n # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n # m = n = S, k = S - 2\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n # m = S, n = S -1, k = S - 2\n SampleInput(make_tensor((S, S - 1), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n )\n\n return samples\n\ndef sample_inputs_ormqr(op_info, device, dtype, requires_grad):\n # create a helper function wrapping `make_tensor`\n make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def gen_inputs():\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n tf = [True, False]\n for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):\n reflectors = make_input((*batch, m, n))\n tau = make_input((*batch, min(m, n)))\n other_matrix_shape = (m, n) if left else (n, m)\n other = make_input((*batch, *other_matrix_shape))\n kwargs = {\"left\": left, \"transpose\": transpose}\n yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)\n\n return tuple(gen_inputs())\n\ndef sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always positive-definite input for torch.linalg.cholesky using\n random_hermitian_pd_matrix.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n, upper in product(batches, ns, [True, False]):\n a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a, kwargs={\"upper\": upper}))\n return out\n\ndef sample_inputs_symeig(op_info, device, dtype, requires_grad=False):\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n\n for o in out:\n o.kwargs = {\"upper\": bool(np.random.choice([True, False])),\n \"eigenvectors\": True}\n # A gauge-invariant function\n o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))\n return out\n\ndef sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False):\n \"\"\"\n This function generates input for torch.linalg.eigh with UPLO=\"U\" or \"L\" keyword argument.\n \"\"\"\n def out_fn(output):\n return output[0], abs(output[1])\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n\n return samples\n\ndef sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.eigh/eigvalsh with UPLO=\"U\" or \"L\" keyword argument.\n \"\"\"\n def out_fn(output):\n if isinstance(output, tuple):\n # eigh function\n return output[0], abs(output[1])\n else:\n # eigvalsh function\n return output\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.kwargs = {\"UPLO\": np.random.choice([\"L\", \"U\"])}\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):\n def out_fn(output):\n return output[1]\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=True keyword argument.\n \"\"\"\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)\n real_dtype = out[0].input.real.dtype if dtype.is_complex else dtype\n for o in out:\n # requires_grad path for rcond tensor is not implemented\n for rcond in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):\n o.kwargs = {\"rcond\": rcond}\n return out\n\n\ndef sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=True keyword argument.\n \"\"\"\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)\n for o in out:\n o.kwargs = {\"hermitian\": True}\n return out\n\ndef sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):\n \"\"\"\n This function generates always solvable input for torch.linalg.solve\n Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.\n The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.\n The second input is generated as the product of 'batches', 'ns' and 'nrhs'.\n In total this function generates 18 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices.\n 'ns' gives 0x0 and 5x5 matrices.\n and 'nrhs' controls the number of vectors to solve for:\n () - using 1 as the number of vectors implicitly\n (1,) - same as () but explicit\n (3,) - solve for 3 vectors.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.\n torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow\n 1D tensors (vectors) as the right-hand-side.\n Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,\n 'vector_rhs_allowed' may be removed here as well.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, )]\n ns = [5, 0]\n if vector_rhs_allowed:\n nrhs = [(), (1,), (3,)]\n else:\n nrhs = [(1,), (3,)]\n out = []\n for n, batch, rhs in product(ns, batches, nrhs):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)\n b.requires_grad = requires_grad\n out.append(SampleInput(a, args=(b,)))\n return out\n\ndef sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n bs = (1, 2, 0)\n ns = (3, 0)\n ks = (1, 3, 0)\n\n def gen_inputs():\n for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)):\n with torch.no_grad():\n if b == 1:\n A = make_arg((n, n)) if left else make_arg((k, k))\n B = make_arg((n, k))\n else:\n A = make_arg((b, n, n)) if left else make_arg((b, k, k))\n B = make_arg((b, n, k))\n if uni:\n # Not really necessary, but writing it for consistency\n A.diagonal(0, -2, -1).fill_(1.)\n else:\n d = A.diagonal(0, -2, -1)\n d[d.abs() < 1e-6] = 1.\n if upper:\n A.triu_()\n else:\n A.tril_()\n kwargs = {\"upper\": upper, \"left\": left, \"unitriangular\": uni}\n if requires_grad:\n for grad_A, grad_B in product((True, False), repeat=2):\n # Either A or B needs to have a gradient\n if not grad_A and not grad_B:\n continue\n A.requires_grad_(grad_A)\n B.requires_grad_(grad_B)\n yield SampleInput(A, args=(B,), kwargs=kwargs)\n else:\n yield SampleInput(A, args=(B,), kwargs=kwargs)\n\n return list(gen_inputs())\n\ndef sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always solvable input for legacy solve functions\n (the ones that are not in torch.linalg module).\n The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation\n should have b.ndim >= 2, vectors are not allowed.\n Also the arguments order is swapped.\n \"\"\"\n out = sample_inputs_linalg_solve(\n op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False\n )\n\n # Reverses tensor order\n for sample in out:\n sample.input, sample.args = sample.args[0], (sample.input,)\n\n return out\n\n\ndef sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n out = sample_inputs_linalg_cholesky_inverse(\n op_info, device, dtype, requires_grad=False\n )\n\n for sample in out:\n psd_matrix = sample.input\n sample.input = make_tensor(psd_matrix.shape, device, dtype, requires_grad=requires_grad, low=None, high=None)\n sample.args = (psd_matrix.requires_grad_(requires_grad),)\n\n return out\n\n\ndef sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):\n # not needed once OpInfo tests support Iterables\n def generate_samples():\n batch_shapes = ((), (3,), (3, 3))\n for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):\n shape = batch_shape + (S + size_delta, S)\n input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)\n yield SampleInput(input, args=(True, get_infos))\n\n return list(generate_samples())\n\n\ndef sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, )]\n ns = [5, 3, 0]\n nrhs = [0, 1, 6]\n\n def generate_samples():\n for n, batch, rhs in product(ns, batches, nrhs):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n requires_grad_options = (False,) if not requires_grad else (True, False)\n # we try all possible combinations of requires_grad for each input\n for lu_requires_grad, b_requires_grad in product(requires_grad_options, requires_grad_options):\n # when requires_grad == True, at least one input has to have requires_grad enabled\n if requires_grad and not lu_requires_grad and not b_requires_grad:\n continue\n # we run LU several times to guarantee that the produced SampleInputs are independent\n # this is especially important when setting different requries_grad for same tensors!\n lu, pivs = a.lu()\n lu.requires_grad = lu_requires_grad\n b = torch.randn(*batch, n, rhs, dtype=dtype, device=device)\n b.requires_grad = b_requires_grad\n yield SampleInput(b, args=(lu, pivs))\n\n return list(generate_samples())\n\n\ndef sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):\n # not needed once OpInfo tests support Iterables\n def generate_samples():\n for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):\n lu_data, pivots = lu_sample.input.lu()\n yield SampleInput(lu_data, args=(pivots,))\n\n # generate rectangular inputs\n lu_data_shape = lu_data.shape\n batch_shape = lu_data_shape[:-2]\n n = lu_data_shape[-2]\n\n for shape_inc in ((1, 0), (0, 1)):\n lu_data, pivots = make_tensor(\n batch_shape + (n + shape_inc[0], n + shape_inc[1]),\n device, dtype,\n requires_grad=False,\n low=None, high=None\n ).lu()\n lu_data.requires_grad_(requires_grad)\n yield SampleInput(lu_data, args=(pivots,))\n\n return list(generate_samples())\n\n\ndef sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((1, (0, 1),),\n (1, (1, 2),),\n (1, (1, -1),),\n ())\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):\n tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype,\n requires_grad=requires_grad)\n tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype,\n requires_grad=requires_grad)\n\n return [\n SampleInput(tensor_nd()),\n SampleInput(tensor_nd(), kwargs=dict(dim=1)),\n SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)),\n\n SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)),\n SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)),\n ]\n\n\ndef _generate_correlation_inputs(device, dtype, requires_grad):\n shapes = [(2,), (1, 2), (3, 2), (2, 3)]\n for shape in shapes:\n yield make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n\ndef sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]\n\n\ndef sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n for t in _generate_correlation_inputs(device, dtype, requires_grad):\n inputs.append(SampleInput(t))\n num_observations = t.numel() if t.ndimension() < 2 else t.size(1)\n fweights = make_tensor((num_observations,), device, torch.int, low=1, high=10)\n aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)\n for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):\n inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),\n kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))\n return inputs\n\n\ndef _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):\n \"\"\"\n This function generates input for torch.svd with distinct singular values so that autograd is always stable.\n Matrices of different size:\n square matrix - S x S size\n tall marix - S x (S-2)\n wide matrix - (S-2) x S\n and batched variants of above are generated.\n Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd\n It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n # svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice\n # along different dimensions when needed (this is used by\n # test_cases2:wide_all and wide_all_batched below)\n if is_linalg_svd:\n def slice_V(v):\n return v[..., :(S - 2), :]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0]\n return u00 * v00_conj\n else:\n def slice_V(v):\n return v[..., :, :(S - 2)]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0].conj()\n return u00 * v00_conj\n\n test_cases1 = ( # some=True (default)\n # loss functions for complex-valued svd have to be \"gauge invariant\",\n # i.e. loss functions shouldn't change when sigh of the singular vectors change.\n # the simplest choice to satisfy this requirement is to apply 'abs'.\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),\n lambda usv: usv[1]), # 'check_grad_s'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),\n lambda usv: abs(usv[0])), # 'check_grad_u'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),\n lambda usv: abs(usv[2])), # 'check_grad_v'\n # this test is important as it checks the additional term that is non-zero only for complex-valued inputs\n # and when the loss function depends both on 'u' and 'v'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),\n uv_loss), # 'check_grad_uv'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device),\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'\n )\n test_cases2 = ( # some=False\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],\n lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'\n )\n\n out = []\n for a, out_fn in test_cases1:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': False}\n else:\n kwargs = {'some': True}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n for a, out_fn in test_cases2:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': True}\n else:\n kwargs = {'some': False}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n return out\n\n\ndef sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [((1, 2, 3, 4), (0, 2, 3, 1)),\n ((1, 2, 3, 4), (0, -2, -1, 1)),\n ((), ()),\n ((1, 2, 3, 4), (2, 1, 3, 0))]\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=(args,))\n\n return list(generator())\n\n\n# Based on erstwhile method_tests tests & some tensor_op_tests for pow\ndef sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n samples = []\n\n if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:\n test_cases = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, False, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),\n )\n tests_require_resizing = (\n ((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),\n ((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),\n )\n cases = test_cases + tests_require_resizing\n\n samples = []\n for (shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,\n high_e, additive_e, e_grad, broadcasts_input) in cases:\n si = SampleInput((make_arg(shape_b, low=low_b, high=high_b) + additive_b).requires_grad_(b_grad),\n args=((make_arg(shape_e, low=low_e, high=high_e) + additive_e).requires_grad_(e_grad),),\n broadcasts_input=broadcasts_input)\n samples.append(si)\n\n tensor_scalar_inputs = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))\n )\n more_samples = list(SampleInput(\n (make_arg(shape, high=high, low=low) + additive).requires_grad_(b_grad),\n args=exp)\n for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)\n\n samples = [*samples, *more_samples]\n elif dtype in [torch.complex64, torch.complex128]:\n args_tuple = (\n ((2, 2), 0, 5, requires_grad, (3.14,)),\n ((), 0, 1, requires_grad, (3.14,)),\n ((), 0, 1, requires_grad, (3.14j,))\n )\n samples = list(SampleInput(\n (make_arg(shape, high=high, low=low) + 1e-3 * (1 + 1j)).requires_grad_(b_grad),\n args=arg)\n for shape, low, high, b_grad, arg in args_tuple)\n else: # integral dtype\n exp_tuple = (1, 2, 3)\n samples = list(SampleInput(\n make_arg((2, 2), requires_grad=requires_grad),\n args=(arg,))\n for arg in exp_tuple)\n samples.append(SampleInput(\n make_arg((2, 2), requires_grad=requires_grad),\n args=(make_arg((2, 2), requires_grad=requires_grad),)))\n return tuple(samples)\n\ndef sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)\n\ndef sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)\n\ndef sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):\n N = 10\n tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,\n requires_grad=requires_grad)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):\n eigvecs = make_tensor((S, S), device=device, dtype=dtype,\n low=None, high=None)\n eigvals = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None)\n # we produce only diagonazible inputs which do not have\n # complex eigenvalues for real inputs, as there is no\n # backward implementation for real inputs with complex\n # eigenvalues yet.\n input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()\n input.requires_grad_(requires_grad)\n\n def process_output(eigpair):\n eigvals, eigvecs = eigpair\n if dtype.is_complex:\n # eig produces eigenvectors which are normalized to 1 norm.\n # Note that if v is an eigenvector, so is v * e^{i \\phi},\n # and |v| = |v * e^{i \\phi}| = 1.\n # This, however, makes the eigenvector backward computation process\n # rather unstable unless the objective function is gauge-invariant,\n # that is if f(z) == f(|z|), for example.\n # Hence for complex inputs we ignore the phases and return only\n # the absolute values.\n return eigvals, eigvecs.abs()\n else:\n return eigvals, eigvecs\n\n return [\n SampleInput(\n input,\n kwargs=dict(eigenvectors=True),\n output_process_fn_grad=process_output\n ),\n ]\n\n\ndef sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):\n def c(t):\n return t.detach().clone().requires_grad_(requires_grad)\n\n x = make_tensor((3,), device, dtype, requires_grad=requires_grad)\n y = make_tensor((4,), device, dtype, requires_grad=requires_grad)\n A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad)\n B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)\n C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)\n D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad)\n E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)\n H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad)\n I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)\n\n inputs = []\n\n # Vector operations\n inputs.append(SampleInput([c(x)], args=('i->',))) # sum\n inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer\n\n # Matrix operations\n inputs.append(SampleInput([c(A)], args=(\"ij->i\",))) # col sum\n inputs.append(SampleInput([c(A), c(B)], args=(\"ij,kj->ik\",))) # matmul\n inputs.append(SampleInput([c(A), c(E)], args=(\"ij,Ab->ijAb\",))) # matrix outer product\n\n # Tensor operations\n inputs.append(SampleInput([c(C), c(D)], args=(\"aij,ajk->aik\",))) # batch matmul\n inputs.append(SampleInput([c(D), c(E)], args=(\"aij,jk->aik\",))) # tensor matrix contraction\n inputs.append(SampleInput([c(C), c(B)], args=(\"ijk,ik->j\",))) # non contiguous\n\n # Test diagonals\n inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace\n\n # Test ellipsis\n inputs.append(SampleInput([c(H)], args=(\"i...->...\",)))\n inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',)))\n\n return inputs\n\n\ndef sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.qr\n The input is generated as the itertools.product of 'batches' and 'ns'.\n \"\"\"\n batches = [(), (0,), (2, ), (1, 1)]\n ns = [5, 2, 0]\n out = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n # TODO: CUDA path doesn't work with batched or empty inputs\n if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):\n continue\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_flip(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n sizes = ((S, M, S), (S, 0, M))\n all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())\n\n def gen_samples():\n for size, dims in product(sizes, all_dims):\n yield SampleInput(make_arg(size), kwargs={\"dims\": dims})\n\n return list(gen_samples())\n\ndef sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n if autodiffed:\n samples = (\n ((S, S, S), 1.5, False),\n ((), 1.5, False),\n )\n else:\n cases = (\n ((S, S, S), (), False),\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (S,), False),\n )\n\n # Sample inputs with scalars as torch tensors\n cases_with_tensor_scalar = (\n ((), torch.tensor(1, dtype=dtype, device=device, requires_grad=False), False),\n )\n\n # Sample inputs with broadcasting\n cases_with_broadcasting = (\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S, S), True),\n ((), (S, S, S), True),\n )\n\n samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]\n\n def generator():\n for shape, arg_other, broadcasts_input in samples:\n if isinstance(arg_other, tuple):\n arg = make_arg(arg_other, requires_grad=False, exclude_zero=True)\n else:\n # shape_other is scalar or torch.tensor\n arg = arg_other\n yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))\n\n return list(generator())\n\n# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!\ndef sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):\n x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n\n def detach(tensor):\n return tensor.clone().detach_().requires_grad_(requires_grad)\n\n return [\n SampleInput(detach(x), args=(lb, ub)),\n SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),\n SampleInput(detach(x), args=(detach(lb[:, :1]),)),\n ]\n\ndef sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):\n tensors = (\n make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n if dtype is torch.uint8:\n min_max_vals = ((2, 5), (3, 7))\n else:\n min_max_vals = ((0, 1), (-1, 1))\n\n output = [SampleInput(\n tensor.detach().clone().requires_grad_(requires_grad),\n args=vals) for tensor, vals in product(tensors, min_max_vals)]\n output += [\n SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),\n args=(0.5, None)),\n SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),\n args=(None, 0.5))]\n empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)\n output.append(SampleInput(empty_tensor, args=(0.0, 1.0)))\n return output\n\ndef sample_kwargs_clamp_scalar(device, dtype, input):\n if dtype is torch.uint8:\n min_val, max_val = (random.randint(1, 3), random.randint(4, 8))\n elif dtype.is_floating_point:\n min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]\n else:\n min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))\n return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}\n\ndef sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):\n sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))\n sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),\n kwargs={'dim': 1})\n sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),),\n kwargs={'dim': -1})\n\n return (sample0, sample1, sample2)\n\ndef sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_zeros(dim_select):\n assert len(dim_select) == 2\n result = make_arg(3 * (S,))\n with torch.no_grad():\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n # will not be needed once OpInfo tests suport Iterables\n def sample_generator():\n for dim in range(3):\n yield SampleInput(make_arg((S, S, S)), args=(dim,))\n # Scalar tensors and empty tensor\n for size in [(), (1,), (0,)]:\n yield SampleInput(make_arg(size), args=(0,))\n\n yield SampleInput(prod_zeros([0, 1]), args=(1,))\n yield SampleInput(prod_zeros([0, 2]), args=(1,))\n yield SampleInput(prod_zeros([1, 2]), args=(1,))\n\n # test dtype kwarg\n yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})\n\n return list(sample_generator())\n\ndef sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]\n\ndef sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((), device, dtype, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor(*shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n cases = [\n # no broadcast\n ((S, S, S), (S, S, S), False),\n # broadcast rhs\n ((S, S, S), (S, S), False),\n\n # scalar\n ((S, S), 3.14, False),\n # scalar positive zero\n ((S, S), 0.0, False),\n # scalar negative zero\n ((S, S), -0.0, False),\n ]\n\n # broadcast lhs\n cases.append(((S, S), (S, S, S), True))\n # broadcast all\n cases.append(((S, 1, S), (M, S), True))\n\n def generator():\n for input_shape, arg_val, broadcasts_input in cases:\n if isinstance(arg_val, tuple):\n arg = _make_tensor(*arg_val)\n else:\n # arg_val is scalar\n arg = arg_val\n\n yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_prod(op_info, device, dtype, requires_grad):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_single_zero():\n result = make_arg(2 * (S,))\n with torch.no_grad():\n result[0, 1] = 0\n return result\n\n # will not be needed once OpInfo tests support Iterables\n def sample_generator():\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n # only Tensor, ignore other inputs\n yield SampleInput(sample.input.detach().clone().requires_grad_(requires_grad))\n yield sample\n\n # Generates samples with keepdim = True\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n sample.kwargs['keepdim'] = True\n yield sample\n\n yield SampleInput(prod_single_zero())\n yield SampleInput(make_arg((3, 3, 3)), args=(1,))\n yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})\n\n # test zero scalar tensor\n zero = make_arg(())\n with torch.no_grad():\n zero.zero_()\n yield SampleInput(zero.detach().clone().requires_grad_(requires_grad))\n yield SampleInput(zero.detach().clone().requires_grad_(requires_grad), args=(0,))\n yield SampleInput(zero.detach().clone().requires_grad_(requires_grad),\n args=(0,),\n kwargs={'keepdim': True})\n\n return list(sample_generator())\n\ndef error_inputs_neg(op_info, device, **kwargs):\n si = SampleInput(torch.tensor((False, True), device=device))\n msg = (\"Negation, the `\\\\-` operator, on a bool tensor is not supported.\"\n \" If you are trying to invert a mask, use the `\\\\~` or\"\n \" `logical_not\\\\(\\\\)` operator instead.\")\n return (ErrorInput(si, error_type=RuntimeError, error_regex=msg),)\n\ndef sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (S, S), False),\n ((S, S), (S,), False),\n ((S, ), (S, S), True)\n )\n\n def generator():\n for shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)\n\n return list(generator())\n\n\ndef sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):\n vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))\n\n tensors = (\n make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n args = ((), (2,), (-2,), (1,), (2,))\n\n samples = []\n for tensor, arg in product(tensors, args):\n samples.append(SampleInput(tensor.detach().clone().requires_grad_(requires_grad), args=arg))\n\n return samples + [vec_sample]\n\ndef sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # Shapes for 2D Tensors\n shapes_2d = ((M, M), (3, 5), (5, 3))\n\n # Shapes for 3D Tensors\n shapes_3d = ((M, M, M),)\n\n args_2d = ((), (2,), (-2,), (1,))\n args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))\n\n def generator():\n for shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):\n yield SampleInput(make_arg(shape), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # Shapes for 2D Tensors\n shapes_2d = ((M, M), (3, 5), (5, 3))\n\n # Shapes for 3D Tensors\n shapes_3d = ((M, M, M),)\n\n args_2d = ((), (2,), (-2,), (1,))\n args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))\n\n def generator():\n for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):\n input_ = make_arg(input_shape)\n # We can programatically figure out the right shape for src:\n # It should be the same size as input.diagonal(other_args...)\n if not isinstance(arg, tuple):\n arg_tuple = (arg,)\n else:\n arg_tuple = arg\n src_shape = input_.diagonal(*arg_tuple).size()\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *arg_tuple))\n\n return list(generator())\n\n\ndef sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),\n SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)\n\ndef sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs):\n batch_size, num_classes = shape = (2, 3)\n reductions = (\"mean\", \"sum\", \"none\")\n\n input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [\n (shape, dict()),\n ((*shape, 1), dict()),\n ((*shape, 1, 2), dict()),\n ((*shape, 1, 2, 3), dict()),\n *[(shape, dict(reduction=reduction)) for reduction in reductions],\n *[\n (\n shape,\n dict(\n weight=make_tensor((num_classes,), device=device, dtype=dtype),\n reduction=reduction,\n ),\n )\n for reduction in reductions\n ],\n (shape, dict(ignore_index=1)),\n ]\n\n sample_inputs = []\n for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)):\n input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if probabilities_target:\n # ignore_index is not supported for probabilities target\n if \"ignore_index\" in kwargs:\n continue\n\n target = make_tensor(\n input_shape,\n low=0,\n high=1,\n device=device,\n dtype=dtype,\n requires_grad=requires_grad,\n )\n else:\n target = make_tensor(\n (batch_size, *input_shape[2:]),\n low=0,\n high=num_classes,\n device=device,\n dtype=torch.long,\n )\n\n if \"ignore_index\" in kwargs and torch.all(target == kwargs[\"ignore_index\"]):\n # make sure at least one item in target is not ignored\n target[0] = random.sample(set(range(num_classes)) - {kwargs[\"ignore_index\"]}, 1)[0]\n\n sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))\n\n return sample_inputs\n\n# Used for log_softmax, softmax, softmin\ndef sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [\n ((S, ), (0, )),\n ((S, S), (0, )),\n ((S, S), (1, )),\n ((S, S), (-1, )),\n ((S, M, S), (2, )),\n ]\n\n # PyTorch on XLA throws an error when passed with dim argument for 0d tensor.\n # See https://github.com/pytorch/xla/issues/3061 for more details.\n if torch.device(device).type != 'xla':\n cases.append(((), (0, )))\n\n return [\n SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)\n for shape, dim in cases\n ]\n\n\ndef sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):\n \"\"\"Sample inputs for masked softmax, log_softmax, and softmin.\n\n Masked normalization operator is a reduction operator with\n trailing mask optional argument. A mask is a bool tensor with the\n same shape as input or a shape that is broadcastable to input\n shape.\n \"\"\"\n inputs: List[SampleInput] = []\n for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs):\n for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):\n sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)\n inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\ndef sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked normalize.\n \"\"\"\n inputs: List[SampleInput] = []\n for ord in [2.0, 1, float('inf'), float('-inf'), 0]:\n for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):\n sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()\n inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\ndef sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n\n # Note: Operator is very sensitive at points near the\n # start and end of domain and leads to NaN for float16\n # if domain_eps is 1e-5.\n domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2\n\n low = low + domain_eps\n high = high - domain_eps\n\n samples = (\n SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n )\n\n return samples\n\ndef sample_inputs_isin(op_info, device, dtype, requires_grad):\n element = make_tensor((L,), device, dtype, low=None, high=None, requires_grad=requires_grad)\n indices = torch.randint(0, L, size=[S])\n test_elements = element[indices].clone()\n return [\n SampleInput(element, args=(test_elements,))\n ]\n\ndef sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def samples_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),\n broadcasts_input=True)\n\n samples = tuple(samples_generator())\n return samples\n\n\ndef sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def sample_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))\n\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg(())),\n broadcasts_input=True)\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, 10),\n broadcasts_input=True)\n\n samples = tuple(sample_generator())\n return samples\n\ndef sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn(M, M, device=device) > 0,)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M,), device=device) > 0,)),\n\n SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n )\n\n return samples\n\ndef sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),\n )\n\n return samples\n\ndef sample_inputs_matmul(op_info, device, dtype, requires_grad):\n test_cases = (((L,), (L,)),\n ((S, M), (M,)),\n ((M,), (M, S)),\n ((S, M), (M, S)),\n ((S, 0), (0, M)),\n ((S, S, M), (M,)),\n ((S, S, M), (M, S)),\n ((S, S, 0), (0, S)),\n ((M,), (S, M, S)),\n ((S, M), (S, M, S)),\n ((0, 0), (S, 0, 0)),\n ((S, S, M, M), (S, S, M, S)),\n ((S, S, M, M), (M,)),\n ((M,), (S, S, M, S)))\n sample_inputs = []\n for lhs_shape, rhs_shape in test_cases:\n lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n if op_info.name == 'matmul':\n sample_inputs.append(SampleInput(lhs, args=(rhs,)))\n elif op_info.name == '__rmatmul__':\n sample_inputs.append(SampleInput(rhs, args=(lhs,)))\n else:\n raise RuntimeError(\"`op_info.name` must be 'matmul' or '__rmatmul__'\")\n return tuple(sample_inputs)\n\n\ndef sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,\n requires_grad: bool,\n *, variant: str) -> List[SampleInput]:\n if variant == 'variadic':\n def make_inputs(\n tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,\n List[torch.Tensor]],\n Tuple[torch.Tensor, ...]]:\n return tensors[0], tuple(tensors[1:])\n elif variant == 'list':\n def make_inputs(\n tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,\n List[torch.Tensor]],\n Tuple[torch.Tensor, ...]]:\n return tensors, ()\n else:\n raise ValueError(\n 'Unsupported variant, must be one of {\"variadic\", \"list\"}. '\n f'Got \"{variant}\".')\n\n SCALAR = torch.Size([])\n VECTOR = torch.Size([3])\n test_cases: List[List[torch.Size]] = [\n [SCALAR],\n [VECTOR],\n [VECTOR, SCALAR],\n [VECTOR, SCALAR, VECTOR],\n [VECTOR, SCALAR, VECTOR, SCALAR],\n ]\n\n sample_inputs = []\n for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}):\n input, args = make_inputs(\n [make_tensor(shape, device, dtype, requires_grad=requires_grad)\n for shape in shapes])\n sample_inputs.append(SampleInput(input=input, args=args,\n kwargs=dict(indexing=indexing)))\n return sample_inputs\n\n\ndef sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\ndef sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape):\n return make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\n\ndef sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n yield SampleInput(make_arg(shape), args=(n,))\n\n return list(generator())\n\n\ndef sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n # Since the accepted lower bound for input\n # to mvlgamma depends on `p` argument,\n # the following function computes the lower bound\n # which we pass to `make_tensor`.\n def compute_min_val(p):\n return (p - 1.) / 2\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n min_val = compute_min_val(n)\n if not dtype.is_floating_point:\n # Round-up minimum value for integral dtypes\n min_val += 1\n yield SampleInput(make_arg(shape, low=min_val), args=(n,))\n\n return list(generator())\n\n\n# Since `mvlgamma` has multiple entries,\n# there are multiple common skips for the additional\n# entries. Following function is a helper to that end.\ndef skips_mvlgamma(skip_redundant=False):\n skips = (\n # outside domain values are hard error for mvlgamma op.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_float_domains'),\n )\n if skip_redundant:\n # Redundant tests\n skips = skips + ( # type: ignore[assignment]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n )\n return skips\n\n\n# To test reference numerics against multiple values of argument `p`,\n# we make multiple OpInfo entries with each entry corresponding to different value of p.\n# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.\n# Class `MvlGammaInfo` already contains the basic information related to the operator,\n# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which\n# differ between the entries.\nclass MvlGammaInfo(UnaryUfuncInfo):\n def __init__(self, variant_test_name, domain, skips, sample_kwargs):\n super(MvlGammaInfo, self).__init__(\n 'mvlgamma',\n ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.multigammaln',),\n variant_test_name=variant_test_name,\n domain=domain,\n decorators=(precisionOverride({torch.float16: 5e-2}),),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half),\n sample_inputs_func=sample_inputs_mvlgamma,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n skips=skips,\n sample_kwargs=sample_kwargs)\n\n\ndef sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):\n low, _ = op_info.domain\n\n if requires_grad:\n low = 0 + op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device, dtype,\n low=low,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=low,\n requires_grad=requires_grad)))\n\n\ndef sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),\n args=(make_arg((S,), low=2, requires_grad=False),)),\n SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),\n args=(3.,)),\n )\n\n return samples\n\n\n# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,\n# supports `exclude` argument.\n# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617\ndef sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):\n\n samples = (SampleInput(make_tensor((S,), device, dtype,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n requires_grad=requires_grad)))\n\n if requires_grad and op_info.op == torch.special.i0e:\n # NOTE: `i0e`'s first-order gradient is not continous\n # at `0`, hence we don't test `i0e` with any input being `0`.\n # TODO: Remove this when `make_tensor` supports excluding `0`.\n with torch.no_grad():\n for sample in samples:\n t = sample.input\n t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]\n elif requires_grad and op_info.op != torch.special.i0e:\n # Special Case for gradient\n # Sample with `0` in the input\n t = make_tensor((S,), device, dtype,\n requires_grad=requires_grad)\n\n with torch.no_grad():\n t[0] = 0\n\n samples += (SampleInput(t),) # type: ignore[assignment]\n\n return samples\n\n\ndef sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):\n filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]\n return (SampleInput(input.detach().clone().requires_grad_(requires_grad),\n args=(arg,), kwargs=dict(alpha=alpha))\n for (input, arg), alpha in filtered_product)\n\n int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j\n\n if variant == 'tensor':\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),\n )\n\n if dtype.is_complex:\n alphas = [int_alpha, float_alpha, complex_alpha]\n elif dtype.is_floating_point:\n alphas = [int_alpha, float_alpha]\n else:\n alphas = [int_alpha]\n\n args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),\n (_make_tensor_helper((S, S)), _make_tensor_helper((S,))),\n (_make_tensor_helper(()), _make_tensor_helper(())))\n samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]\n elif variant == 'scalar':\n # Scalar Other\n samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),\n SampleInput(_make_tensor_helper(()), args=(0.5,)),\n SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),\n SampleInput(_make_tensor_helper(()), args=(1.5j,)),\n SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),\n SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))\n\n scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),\n (_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),\n (_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]\n\n alphas = [int_alpha, float_alpha, complex_alpha]\n\n def filter_fn(arg_alpha):\n arg, alpha = arg_alpha\n if isinstance(alpha, complex):\n if dtype.is_complex or isinstance(arg[1], complex):\n return True\n else:\n # complex alpha is valid only if either `self` or `other` is complex\n return False\n\n # Non-Complex Alpha\n return True\n\n # Samples with alpha (scalar version) covers the following cases\n # self | other | alpha\n # -----------------------------------------\n # real | real | real (int and float)\n # real | complex | real and complex\n # complex | real | real and complex\n # complex | complex | real and complex\n #\n # It does not cover\n # real | real | complex\n # x = torch.randn(2, requires_grad=True, dtype=torch.float64)\n # torch.rsub(x, 1, alpha=1. + 1.6j)\n # RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)\n\n samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]\n else:\n raise Exception(\"Invalid variant!\")\n\n return samples\n\ndef sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),\n SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),\n SampleInput(_make_tensor_helper(()), args=(0,)),\n ]\n\n if supports_dtype_kwargs:\n # NOTE: if `dtype` is not same as input, then inplace variants fail with\n # `provided dtype must match the dtype of self tensor in cumsum`\n samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))\n\n return samples\n\n\ndef sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((), (0, 1, 1)),\n ((S, S, S, S), (0, 3, 1)),\n ((S, S, S, S), (1, 3, 1)),\n ((S, S, S, S), (2, 3, 1)),\n ((S, S, S, S), (3, 3, 1)),\n ((S, S, S, S), (0, 3, 2)),\n ((S, S, S, S), (1, 3, 2)),\n ((S, S, S, S), (2, 3, 2)),\n ((S, S, S, S), (3, 3, 2)),\n ((S, S, S, S), (0, 4, 1)),\n ((S, S, S, S), (1, 4, 1)),\n ((S, S, S, S), (2, 4, 1)),\n ((S, S, S, S), (3, 4, 1)),\n ((M,), (0, 3, 1)),\n ((M,), (0, 3, 2)),\n ((M,), (0, 3, 3)),\n ((1000,), (0, 3, 11)),\n ((1000,), (0, 2, 27)),\n ((10, 10), (0, 1, 2)),\n ((10, 10), (1, 2, 3)),\n ((10, 10), (1, 2, 2)),\n ((S, S, S), (2, 3, 2)),\n )\n\n sample_inputs = []\n for shape, arguments in test_cases:\n sample_inputs += [SampleInput(make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=arguments)]\n return sample_inputs\n\n\ndef sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n cases = (\n ((S, S, S), (S, S, S), False),\n ((), (), False),\n ((S, S, S), (S,), False),\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S), True),\n )\n\n def generator():\n for x_shape, y_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),\n broadcasts_input=broadcasts_input)\n\n return list(generator())\n\n\ndef sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if list_args:\n cases = (\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),\n ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),\n ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)\n )\n else:\n cases = ( # type: ignore[assignment]\n ((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),\n ((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_msort(op_info, device, dtype, requires_grad):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n\n sample = SampleInput(make_tensor((S, M, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n return [largesample, sample]\n\ndef sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n samples = (\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),\n # broadcast rhs with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),\n # broadcast rhs and weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),\n # broadcast lhs\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # scalar broadcast_lhs\n SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # tensor broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),\n broadcasts_input=True),\n # no broadcast with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))),\n # broadcast lhs with weight tensor\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True),\n # broadcast lhs and weight tensor\n SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True),\n # broadcast lhs and weight tensor variant\n SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True),\n )\n\n if dtype.is_complex:\n samples = samples + ( # type: ignore[assignment]\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),\n )\n\n return samples\n\ndef sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):\n cases = (\n ((2, 2, 2), (2, 2, 2), (2)),\n ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),\n )\n samples = []\n for first_shape, second_shape, dims in cases:\n samples.append(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n kwargs=dict(dims=dims,)))\n return tuple(samples)\n\ndef sample_inputs_kron(op_info, device, dtype, requires_grad):\n test_cases = (\n ((S, S), (M, L)),\n )\n\n sample_inputs = []\n for input_shape, other_shape in test_cases:\n input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample = SampleInput(input, args=(other,))\n sample_inputs.append(sample)\n return tuple(sample_inputs)\n\ndef sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n )\n ),\n SampleInput(\n make_tensor((), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_scatter(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n (_tensor(()), (0, zero.clone().detach(), 2.5)),\n )\n\n samples = []\n for tensor, args in test_cases:\n samples.append(SampleInput(tensor, args=args))\n\n if not requires_grad:\n samples.append(SampleInput(\n tensor.clone().detach(),\n args=args, kwargs={'reduce': 'add'}\n ))\n\n if dtype.is_floating_point:\n samples.append(SampleInput(\n tensor.clone().detach(),\n args=args, kwargs={'reduce': 'multiply'}\n ))\n\n return samples\n\ndef sample_inputs_scatter_add(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\n\ndef sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):\n samples = (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),)\n\n return samples\n\n\ndef sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n cases = (((M, M), ()),\n ((M, M), (2,),),\n ((S, M, M), ()),\n ((S, M, M), (2,)),\n ((3, 3, S, S), ()),)\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def generator():\n yield SampleInput(make_arg((S, M, S)))\n yield SampleInput(make_arg(()))\n\n return list(generator())\n\n\ndef sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def generator():\n yield SampleInput(make_arg((S, S)))\n\n return list(generator())\n\n\ndef sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # list of tuples (shape, shape) defining the shapes of the input and output tensors\n sample_shapes = [\n ((), ()),\n ((S), (1)),\n ((S, S), (1, 1)),\n ((S, S), (1, S)),\n ((S, S), (S, S)),\n ((S, S, S), (S, 1, S)),\n ]\n\n samples = []\n\n for input_shape, output_shape in sample_shapes:\n input_t = make_arg(input_shape)\n samples.append(SampleInput(input_t, args=(output_shape,)))\n\n return samples\n\ndef sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1, 1)),\n )\n\n def generator():\n for shape, args_or_shape in cases:\n # Update `args` based on operator\n if op_info.name == 'resize_':\n # resize_ takes shape/tuple of ints,\n args = (args_or_shape, )\n elif op_info.name == 'resize_as_':\n # resize_as_ takes another tensor\n args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]\n else:\n raise ValueError(\"sample_inputs_resize_ops is being used with incorrect operator\")\n\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\n return list(generator())\n\ndef sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S * S, S)),\n ((S * S, S), (S, S, S)),\n ((S * S, S), (S, -1, S)),\n ((S * S * 2, S), (S, -1)),\n ((S,), (S,)),\n ((), ()),\n ((), (1,)))\n\n def generator():\n for case in cases:\n shape, args = case\n inp = make_arg(shape, requires_grad=requires_grad)\n yield(SampleInput(inp, args=(args, )))\n\n if op_info.name != \"view\" and len(shape) >= 2:\n yield(SampleInput(\n inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),\n args=(args, )))\n\n return list(generator())\n\ndef sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for case in cases:\n shape, shape_other = case\n inp = make_arg(shape, requires_grad=requires_grad)\n yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))\n\n if op_info.name != \"view_as\" and len(shape) >= 2:\n yield(SampleInput(\n inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),\n args=(make_arg(shape_other, requires_grad=False),)))\n\n return list(generator())\n\ndef sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs):\n input_list = []\n shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),)\n make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n samples = []\n for shape in shapes:\n input_list.append(make_tensor_partial(shape))\n samples.append(SampleInput(make_tensor_partial(shape)))\n samples.append(SampleInput(input_list, ))\n return samples\n\ndef sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1, 2)),\n ((S, S, S), (-1, 2)),\n ((S, S, S), (-1, -1)),\n ((S, S, S), (1, -1)),\n ((S,), (0, 2))\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S, S), (1, 2)),\n ((S, S, S), (S, S), (-1, 2)),\n ((S, S, S), (S, S), (-1, -1)),\n ((S, S, S), (S, S), (1, -1)),\n ((S,), (), (0, 2))\n )\n\n def generator():\n for input_shape, src_shape, args in cases:\n input_ = make_arg(input_shape)\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *args))\n\n return list(generator())\n\n\ndef sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)),\n ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)),\n ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)),\n ((L, L, L), (L, L, L,), (1, 0, L, 1)),\n ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)),\n ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)),\n ((L, L, L), (L, L, L,), (2, 0, L, 1)),\n ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)),\n ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)),\n )\n\n def generator():\n for input_shape, src_shape, args in cases:\n input_ = make_arg(input_shape)\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *args))\n\n return list(generator())\n\n\ndef sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n scalar: Union[int, float, complex] = 3\n\n if dtype.is_floating_point:\n scalar = 3.14\n elif dtype.is_complex:\n scalar = 3.14j\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),\n SampleInput(_make_tensor_helper(()), args=(scalar,)),\n ]\n\n return samples\n\n\ndef sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1, S), (-1, S, -1)),\n ((S, 1, S), (-1, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\n return list(generator())\n\ndef sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((),\n (2, 3))\n memory_format_options = [None, torch.contiguous_format]\n\n def generator():\n for shape, memory_format in itertools.product(shapes, memory_format_options):\n yield SampleInput(make_arg(shape),\n kwargs={'memory_format': memory_format} if memory_format else {})\n return list(generator())\n\ndef sample_inputs_conversion_channels_last(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n return [\n # Channels last case: input must be 4d\n SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last})\n\n ]\n\ndef sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for shape, shape_other in cases:\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n return list(generator())\n\n\ndef sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def make_bool_mask(shape):\n # Make sure atleast one element is nonzero,\n # except for empty tensor\n mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\n\n if mask_t.numel() == 0:\n return mask_t\n elif mask_t.numel() == 1:\n mask_t.fill_(True)\n return mask_t\n\n if mask_t.sum() == 0:\n def random_index(shape):\n return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))\n\n mask_t[random_index(mask_t.shape)] = True\n return mask_t\n\n return mask_t\n\n cases = (((M, M), (M, M), (M, M), False),\n ((M, 1, M), (M, M), (M, M, 1), True),\n ((), (), (), False),\n ((M, 1, M), (), (M, M, 1), True),\n ((), (M, M), (), True),)\n\n def generator():\n for shape, mask_shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape),\n args=(make_bool_mask(mask_shape), make_arg(other_shape)),\n broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n inputs = []\n for shape in sizes:\n # construct input without any non-zero elements\n zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n inputs.append(zeros)\n\n # construct input with mixed zero and non-zero elements\n mixed = make_arg(shape).requires_grad_(False)\n mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\n mixed[mask_t] = 0\n inputs.append(mixed)\n\n def generator():\n for input_t, as_tuple in product(inputs, [False, True]):\n yield(SampleInput(input_t.detach().clone().requires_grad_(requires_grad),\n kwargs=dict(as_tuple=as_tuple)))\n\n return list(generator())\n\ndef sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n ((S, S, S), (S, -1)))\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\n return list(generator())\n\ndef sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n test_cases = [\n (_tensor((S, S, S)), (2,)),\n (_tensor((S, S, S)), (2, 1,)),\n (_tensor((S, S, S)), (2, -1,)),\n (_tensor((S, S, S)), (2, 1, True,)),\n (_tensor((S, S, S)), (2, -1, True,)),\n (_tensor((S,)), (2, 0,)),\n (_tensor((S,)), (2, 0, True,)),\n (_tensor(()), (1,)),\n (_tensor(()), (1, 0,)),\n (_tensor(()), (1, 0, True))\n ]\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\ndef error_inputs_kthvalue(op_info, device, **kwargs):\n # tests overlapping output fails\n t = make_tensor(10, dtype=torch.float32, device=device)\n indices = torch.empty((), device=device, dtype=torch.long)\n si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)})\n\n k_out_of_range_err = \"selected number k out of range for dimension\"\n return (ErrorInput(si, error_type=RuntimeError, error_regex=\"unsupported operation\"),\n ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),\n ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),\n ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),)\n\n\ndef sample_inputs_dropout(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_arg()),\n SampleInput(make_arg(), kwargs=dict(p=0.0)),\n SampleInput(make_arg(), kwargs=dict(p=1.0)),\n SampleInput(make_arg(), kwargs=dict(training=False)),\n ]\n\n\ndef sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape):\n return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_long_input(shape, *, low, high, noncontiguous=False):\n return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high,\n noncontiguous=noncontiguous)\n\n def make_per_sample_weight(flag, idx):\n # a tensor of float / double weights, or None\n # to indicate all weights should be taken to be 1\n if flag:\n return make_input(idx.shape)\n return None\n\n def generator():\n offsets = torch.tensor([0, 3], device=device, dtype=torch.long)\n for generate_per_sample_weight in (True, False):\n for mode in ('sum', 'mean', 'max'):\n # per_sample_weights is only supported for mode='sum' (got mode='****')\n if generate_per_sample_weight and mode in ('mean', 'max'):\n continue\n\n # 1-D index tensor\n idx = make_long_input((S,), low=0, high=M)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': offsets, 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((S,), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': offsets, 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n # bag with zero length\n idx = make_long_input((S,), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long),\n 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n # 2-D index tensor\n idx = make_long_input((S, S), low=0, high=M)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((S, S), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})\n\n # The gradient vector at `padding_idx` is not updated.\n # Negative padding_idx\n idx = make_long_input((6,), low=0, high=S)\n idx[0] = 4\n idx[4] = 4\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((S, S)), args=(idx,),\n kwargs={'padding_idx': -1, 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((3, 3), low=0, high=S)\n # Positive padding_idx\n idx[0, 0] = 2\n idx[1, 1] = 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((S, S)), args=(idx,),\n kwargs={'padding_idx': 2, 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S))\n offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},)\n\n if not requires_grad:\n # Following inputs return different gradient from the numerical gradient.\n # This is expected and relevant tests are present in `test_nn.py`.\n\n # Due to inplace renorming of weight, the numerical gradient doesn't match the\n # analytical gradient.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'max_norm': 1., 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'max_norm': 1., 'norm_type': 1.0,\n 'mode': mode, 'offsets': offsets,\n 'per_sample_weights': per_sample_weights},)\n\n if mode != 'max':\n # Scale the gradient based on the inverse frequency of a particular index.\n # Note : smax mode does not support sparse weights\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 1\n idx[0, 1] = 1\n weights = make_input((S, S))\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'scale_grad_by_freq': True, 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n # gradcheck not implemented for sparse tensors.\n # Note : max mode does not support sparse weights\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S))\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((6, ), low=0, high=S)\n idx[0] = 1 # freq more than 1\n idx[1] = 1 # freq more than 1\n idx[3] = 0 # padding_idx\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0,\n 'max_norm': 1., 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights})\n\n return list(generator())\n\n\ndef sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape):\n return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_long_input(shape, *, low, high):\n return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)\n\n def generator():\n # 0-D index tensor\n idx = make_long_input((), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n # 1-D index tensor\n idx = make_long_input((S,), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n # 2-D index tensor\n idx = make_long_input((S, S), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n if not requires_grad:\n # Following inputs return different gradient from the numerical gradient.\n # This is expected and relevant tests are present in `test_nn.py`.\n\n # The gradient vector at `padding_idx` is not updated.\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 2\n idx[1, 1] = 2\n yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)\n\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 4\n idx[1, 1] = 4\n yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)\n\n # Due to inplace renorming of weight, the numerical gradient doesn't match the\n # analytical gradient.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},)\n\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},)\n\n # Scale the gradient based on the inverse frequency of a particular index.\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 1\n idx[0, 1] = 1\n weights = make_input((S, S))\n yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)\n\n # gradcheck not implemented for sparse tensors.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S))\n yield SampleInput(weights, args=(idx,), kwargs={'sparse': True})\n\n idx = make_long_input((3, 3), low=0, high=S)\n idx[0, 0] = 1 # freq more than 1\n idx[0, 1] = 1 # freq more than 1\n idx[1, 0] = 0 # padding_idx\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'scale_grad_by_freq': True,\n 'padding_idx': 0, 'max_norm': 1.})\n\n return list(generator())\n\n\ndef sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape, *, low, high):\n return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)\n\n shapes = ((), (S,), (L, M, S))\n num_classess = (-1, 10)\n\n return [\n SampleInput(\n make_input(\n shape,\n low=0,\n high=10 if num_classes == -1 else num_classes // 2,\n ),\n kwargs=dict(num_classes=num_classes),\n )\n for shape, num_classes in itertools.product(shapes, num_classess)\n ]\n\ndef sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_input()),\n SampleInput(make_input(), kwargs=dict(beta=3)),\n SampleInput(make_input(low=1), kwargs=dict(threshold=1)),\n ]\n\ndef sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):\n def make_input():\n return make_fullrank_matrices_with_distinct_singular_values(12, 12, device=device, dtype=dtype)\n\n # lhs / rhs shape can have any number of dimensions as long as their product equals 12\n shapes = [\n ((2, 2, 3), (12, 1)),\n ((4, 3), (6, 1, 2)),\n ]\n\n samples = []\n for shape_lhs, shape_rhs in shapes:\n inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()\n inp.requires_grad_(requires_grad)\n samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs))))\n\n return samples\n\ndef sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):\n a_shapes = [(2, 3, 6), (3, 4, 4, 3)]\n # Zero-dim tensors are not supported in NumPy, so we skip them for now.\n # NumPy is used in reference check tests.\n # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.\n # a_shapes += [(0, 0, 1, 2, 3, 0)]\n dimss = [None, (0, 2)]\n\n def gen_inputs():\n for a_shape, dims in itertools.product(a_shapes, dimss):\n a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad)\n b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(a, args=(b,), kwargs=dict(dims=dims))\n\n return list(gen_inputs())\n\ndef sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n shapes_and_kwargs = [\n ((), None),\n ((S,), dict(reduction=\"mean\")),\n ((S,), dict(reduction=\"sum\")),\n ((S,), dict(reduction=\"none\")),\n ((S, S), None),\n ((S, S, S), None),\n ]\n\n return [\n SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)\n for shape, kwargs in shapes_and_kwargs\n ]\n\ndef sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n batch_size = 2\n num_channels = 3\n modes = (\"bilinear\", \"nearest\")\n align_cornerss = (False, True)\n padding_modes = (\"zeros\", \"border\", \"reflection\")\n\n sample_inputs = []\n for dim in (2, 3):\n\n modes_ = (*modes, \"bicubic\") if dim == 2 else modes\n\n for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):\n sample_inputs.append(\n SampleInput(\n _make_tensor((batch_size, num_channels, *[S] * dim)),\n args=(_make_tensor((batch_size, *[S] * dim, dim)),),\n kwargs=dict(\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n )\n )\n )\n\n return sample_inputs\n\ndef sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_target(shape):\n shape = () if len(shape) == 1 else (shape[0], )\n t = torch.randint(0, 2, shape, device=device, dtype=torch.long)\n # Label with -1 or 1\n t = t * 2 - 1\n target = t.to(dtype=dtype).detach()\n return target\n\n def gen_inputs():\n shapes = ((S, S), (S,))\n reductions = ('none', 'mean', 'sum')\n for s, r in product(shapes, reductions):\n yield SampleInput(\n make_input(s),\n args=(make_input(s), make_target(s)),\n kwargs=dict(reduction=r, margin=random.uniform(-1, 1))\n )\n\n return list(gen_inputs())\n\ndef sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs):\n input_length = 50\n batch = 16\n num_char = 20\n target_length = 30\n\n def make_log_probs(s):\n t = make_tensor(s, device=device, dtype=dtype)\n log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad)\n return log_probs\n\n def gen_inputs():\n reductions = ('none', 'mean', 'sum')\n zero_inf = (True, False)\n for r, z in product(reductions, zero_inf):\n log_probs = make_log_probs((input_length, batch, num_char))\n targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device)\n input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device)\n target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device)\n\n yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z))\n\n return list(gen_inputs())\n\ndef sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n shape = (2, 3)\n num_classes = shape[1]\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_weight = partial(make_tensor, shape=(num_classes,), device=device, dtype=dtype)\n\n def make_target(shape, zeros=False):\n s = (shape[0], *shape[2:]) if len(shape) > 1 else ()\n if zeros:\n return torch.zeros(s, device=device, dtype=torch.long)\n else:\n return make_tensor(s,\n low=0,\n high=shape[1] if len(shape) > 1 else shape[0],\n device=device,\n dtype=torch.long)\n\n\n def gen_shape_kwargs():\n # Batched, non-batched and 2d\n shapes = (shape, (num_classes,), shape + (2, 2))\n reductions = ('none', 'mean', 'sum')\n for reduction, s in product(reductions, shapes):\n yield make_input(s), make_target(s), dict(reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction)\n t = make_target(s)\n ignore = num_classes // 2\n # If \"mean\", nll returns NaN, so it's not differentiable at those points\n if t.eq(ignore).all() and reduction == \"mean\":\n t.fill_(0)\n yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction)\n # Test ignoring all the targets\n # If \"mean\", nll returns NaN, so it's not differentiable at those points\n if reduction != \"mean\":\n yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction)\n\n def gen_inputs():\n for input, target, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target,), kwargs=kwargs)\n\n return list(gen_inputs())\n\ndef sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs):\n def generator():\n yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad))\n\n mask = torch.tensor([[0, 1, 0, 1, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 0, 1, 0],\n [1, 0, 1, 1, 0],\n [1, 0, 0, 1, 0]], dtype=torch.bool, device=device)\n t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)\n with torch.no_grad():\n t[mask] = 0\n yield SampleInput(t)\n\n t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)\n with torch.no_grad():\n t[mask] = 0\n yield SampleInput(t)\n\n t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(t)\n\n yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad))\n yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))\n\n return list(generator())\n\ndef _generate_sample_shape_reduction():\n shapes = ((S,), (S, S), (S, S, S))\n reductions = ('none', 'mean', 'sum')\n for s, r in product(shapes, reductions):\n yield s, r\n\ndef sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_var = partial(make_tensor, low=0, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def gen_shape(shape):\n yield shape\n # Broadcast\n yield (*shape[:-1], 1)\n yield shape[:-1]\n\n def gen_shape_kwargs():\n for s, r in _generate_sample_shape_reduction():\n for t_s, v_s in product(gen_shape(s), gen_shape(s)):\n yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r)\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(full=True, reduction=r)\n )\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(eps=random.uniform(1e-6, 1e-3), reduction=r)\n )\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r)\n )\n\n def gen_inputs():\n for input, target, var, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target, var, ), kwargs=kwargs)\n\n return list(gen_inputs())\n\ndef _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n for s, r in _generate_sample_shape_reduction():\n yield _make_tensor(s), _make_tensor(s), dict(reduction=r)\n\ndef sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):\n def gen_inputs():\n for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n d['margin'] = random.uniform(-9, 9)\n yield SampleInput(input, args=(target, ), kwargs=d)\n\n return list(gen_inputs())\n\ndef sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs):\n def gen_inputs():\n for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n d['delta'] = random.uniform(1e-3, 9)\n yield SampleInput(input, args=(target, ), kwargs=d)\n\n return list(gen_inputs())\n\ndef sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def gen_shape_kwargs():\n for s, r in _generate_sample_shape_reduction():\n for li in (True, False):\n for f in (True, False):\n yield (\n _make_tensor(s), _make_tensor(s),\n dict(log_input=li, full=f, reduction=r)\n )\n yield (\n _make_tensor(s), _make_tensor(s),\n dict(log_input=li, full=f,\n eps=random.uniform(1e-8, 1e-3),\n reduction=r)\n )\n\n def gen_inputs():\n for input, target, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target, ), kwargs=kwargs)\n\n return list(gen_inputs())\n\ndef sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs):\n make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n shape = (3,)\n batched_shape = (2, *shape)\n shapes_and_kwargs = [\n (shape, None),\n (batched_shape, None),\n (shape, dict(keepdim=True)),\n (batched_shape, dict(keepdim=True)),\n (shape, dict(p=5.0)),\n (shape, dict(p=-1.0)),\n (shape, dict(eps=1.0)),\n ]\n\n return [\n SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs\n ]\n\ndef sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs):\n return [\n SampleInput(\n make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad),\n kwargs=dict(upscale_factor=upscale_factor),\n )\n for upscale_factor in (1, 3)\n ]\n\ndef sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs):\n return [\n SampleInput(\n make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad),\n kwargs=dict(downscale_factor=downscale_factor),\n )\n for downscale_factor in (1, 3)\n ]\n\ndef sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n sample_shapes = [(), (S), (S, S, S)]\n atols = [1e-2, 1e-16]\n rtols = [1e-1, 0.5]\n eps = 1e-8\n for s, rtol, atol in product(sample_shapes, rtols, atols):\n # close sample\n t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n close = (t + atol).detach().requires_grad_(requires_grad)\n close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol))\n samples.append(close_sample)\n\n # random sample\n a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol))\n samples.append(r_sample)\n\n return samples\n\nforeach_unary_op_db: List[OpInfo] = [\n ForeachFuncInfo('exp'),\n ForeachFuncInfo('acos'),\n ForeachFuncInfo('asin'),\n ForeachFuncInfo('atan'),\n ForeachFuncInfo('cos'),\n ForeachFuncInfo('cosh'),\n ForeachFuncInfo('log'),\n ForeachFuncInfo('log10'),\n ForeachFuncInfo('log2'),\n ForeachFuncInfo('tan'),\n ForeachFuncInfo('tanh'),\n ForeachFuncInfo('sin'),\n ForeachFuncInfo('sinh'),\n\n ForeachFuncInfo(\n 'neg',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex(),\n sample_inputs_func=sample_inputs_foreach,\n safe_casts_outputs=False,\n ),\n\n ForeachFuncInfo(\n 'sqrt',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'ceil',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'erf',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'erfc',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'expm1',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'floor',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'log1p',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'round',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'frac',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'reciprocal',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'sigmoid',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'trunc',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'abs',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n safe_casts_outputs=False,\n supports_forward_ad=True,\n ),\n]\n\nforeach_binary_op_db: List[OpInfo] = [\n ForeachFuncInfo(\n \"add\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_alpha_param=True,\n ),\n ForeachFuncInfo(\n \"sub\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_alpha_param=True,\n ),\n ForeachFuncInfo(\n \"mul\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n ),\n ForeachFuncInfo(\n \"div\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n ),\n]\n\nforeach_pointwise_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"addcmul\",\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n ),\n ForeachFuncInfo(\n \"addcdiv\",\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n ),\n]\n\nforeach_minmax_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"maximum\",\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool),\n ),\n ForeachFuncInfo(\n \"minimum\",\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool),\n ),\n]\n\nforeach_reduce_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"norm\",\n dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ),\n]\n\ndef reference_sign(x):\n if x.dtype == np.bool_:\n # `np.sign` doesn't support `bool`.\n # >>> np.sign(True)\n # ufunc 'sign' did not contain a loop\n # with signature matching types dtype('bool') -> dtype('bool')\n return np.sign(x, dtype=np.uint8).astype(np.bool_)\n return np.sign(x)\n\n\ndef reference_sgn(x):\n # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.\n # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.\n # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)\n if x.dtype not in [np.complex64, np.complex128]:\n return reference_sign(x)\n\n out = (x / np.abs(x))\n if out.ndim == 0:\n # Handle x == 0 case\n if (x == 0):\n # Can't assign to np.complex object\n # So make a new one.\n return np.array(complex(0, 0), dtype=x.dtype)\n return out\n\n # Handle x == 0 case\n mask = (x == 0)\n out[mask] = complex(0, 0)\n return out\n\n\ndef reference_sigmoid(x):\n # 'scipy.special.expit' not supported for the input types\n if x.dtype in [np.complex64, np.complex128]:\n return (1 / (1 + np.exp(-x)))\n return scipy.special.expit(x)\n\n\ndef reference_logsigmoid(x):\n return np.where(\n x < 0,\n x - np.log1p(np.exp(x)),\n -np.log1p(np.exp(-x)))\n\n\ndef reference_hardsigmoid(x):\n intermediate = x / 6 + 0.5\n y = np.clip(intermediate, 0, None)\n return np.where(y > 1, 1, y).astype(x.dtype)\n\n\ndef reference_lgamma(x):\n # scipy.special.gammaln returns `-inf` when input is `-inf`.\n # While Pytorch, C and C++, all return `inf` when input is `-inf`.\n # Reference:\n # https://en.cppreference.com/w/cpp/numeric/math/lgamma\n # https://en.cppreference.com/w/c/numeric/math/lgamma\n\n # To handle the above discrepancy,\n # we replace -inf with inf so values\n # that were originally -inf map to inf as expected\n if x.dtype.kind == 'f':\n x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)\n\n out = scipy.special.gammaln(x)\n\n if x.dtype == np.float16:\n # `scipy.special.gammaln` returns output of float32 when input is float16,\n # while `torch.lgamma` preserves `float16`. But due to smaller range of float16,\n # Pytorch version outputs `inf` while SciPy returns finite values.\n out = out.astype(np.float16)\n\n return out\n\ndef reference_polygamma(x, n):\n # WEIRD `scipy.special.polygamma` behavior\n # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype\n # dtype('float64')\n # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype\n # dtype('float32')\n #\n # Thus we cast output to the default torch dtype.\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n return scipy.special.polygamma(n, x).astype(np_dtype)\n\n\ndef reference_mvlgamma(x, d):\n if x.dtype == np.float16:\n return scipy.special.multigammaln(x, d).astype(np.float16)\n\n return scipy.special.multigammaln(x, d)\n\ndef reference_softplus(input, beta=1, threshold=20):\n non_linear = input * beta <= threshold\n output = input.copy()\n output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta\n return output\n\n\ndef reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:\n if num_classes == -1:\n num_classes = int(np.amax(a) + 1)\n\n idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes\n one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)\n np.put(one_hot, idcs, 1)\n return one_hot.reshape(*a.shape, -1)\n\n\ndef reference_mse_loss(input, target, reduction=\"mean\"):\n se = (input - target) ** 2\n if reduction == \"mean\":\n return np.mean(se)\n elif reduction == \"sum\":\n return np.sum(se)\n else: # reduction == \"none\"\n return se\n\n\ndef wrapper_set_seed(op, input, *args, **kwargs):\n \"\"\"Wrapper to set seed manually for some functions like dropout\n See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.\n \"\"\"\n torch.manual_seed(42)\n return op(input, *args, **kwargs)\n\ndef reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):\n feature_size = np.prod(normalized_shape)\n inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]\n mean = inp_view.mean(axis=-1, keepdims=True)\n var = inp_view.var(axis=-1, ddof=0, keepdims=True)\n Y = (inp_view - mean) / np.sqrt(var + eps)\n if weight is None and bias is not None:\n Y = Y + bias.reshape(-1)\n elif weight is not None and bias is None:\n Y = Y * weight.reshape(-1)\n elif weight is not None and bias is not None:\n Y = Y * weight.reshape(-1) + bias.reshape(-1)\n return Y.reshape(*inp.shape)\n\ndef reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5):\n inp_view = inp\n if np.prod(inp.shape) != 0:\n inp_view = inp.reshape((inp.shape[0], num_groups, -1))\n mean = inp_view.mean(axis=-1, keepdims=True)\n var = inp_view.var(axis=-1, ddof=0, keepdims=True)\n Y = (inp_view - mean) / np.sqrt(var + eps)\n Y = Y.reshape(inp.shape)\n if weight is not None:\n # weight is a vector of length equal to the channel\n if len(Y.shape) > 2:\n weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:]))\n Y = Y * weight\n if bias is not None:\n # bias is a vector of length equal to the channel\n if len(Y.shape) > 2:\n bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:]))\n Y = Y + bias\n return Y\n\n\n# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't\n# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into\n# stacked 1D cases\ndef reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):\n side = 'right' if (right or side == 'right') else 'left'\n if len(sorted_sequence.shape) == 1 :\n ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)\n return ret.astype(np.int32) if out_int32 else ret\n elif sorted_sequence.shape[0] == 0:\n if sorter is not None:\n sorter = sorter.flatten()\n ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)\n ret = ret.astype(np.int32) if out_int32 else ret\n return ret.reshape(boundary.shape)\n else:\n # numpy searchsorted only supports 1D inputs so we split up ND inputs\n orig_shape = boundary.shape\n num_splits = np.prod(sorted_sequence.shape[:-1])\n splits = range(0, num_splits)\n sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)\n if sorter is not None:\n sorter = sorter.reshape(num_splits, -1)\n\n split_sequence = [sorted_sequence[i] for i in splits]\n split_boundary = [boundary[i] for i in splits]\n split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]\n\n split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)\n for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)]\n split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret\n return np.stack(split_ret).reshape(orig_shape)\n\n\ndef gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for functions that take Hermitian matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the Hermitian property of the input.\n \"\"\"\n return op(input + input.mH, *args, **kwargs)\n\n\ndef gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):\n \"\"\"Gradcheck wrpper for functions that take lower or upper triangular matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the triangular property of the input.\n `idx` is used to specific which `args[idx]` is to be triangularized.\n \"\"\"\n triangular_arg = args[idx].triu() if upper else args[idx].tril()\n return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs)\n\n\ndef gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for masked operations.\n\n When mask is specified, replaces masked-out elements with zeros.\n\n Use for operations that produce non-finite masked-out elements,\n for instance, for minimum and maximum reductions.\n \"\"\"\n output = op(input, *args, **kwargs)\n mask = kwargs.get('mask')\n if mask is not None:\n output_mask = torch._masked._output_mask(op, input, *args, **kwargs)\n output = torch.where(output_mask, output, output.new_zeros([]))\n return output\n\n\ndef reference_reduction_numpy(f, supports_keepdims=True):\n \"\"\"Wraps a NumPy reduction operator.\n\n The wrapper function will forward dim, keepdim, mask, and identity\n kwargs to the wrapped function as the NumPy equivalent axis,\n keepdims, where, and initiak kwargs, respectively.\n\n Args:\n f: NumPy reduction operator to wrap\n supports_keepdims (bool, optional): Whether the NumPy operator accepts\n keepdims parameter. If it does not, the wrapper will manually unsqueeze\n the reduced dimensions if it was called with keepdim=True. Defaults to True.\n\n Returns:\n Wrapped function\n\n \"\"\"\n @wraps(f)\n def wrapper(x: np.ndarray, *args, **kwargs):\n # Copy keys into a set\n keys = set(kwargs.keys())\n\n dim = kwargs.pop('dim', None)\n keepdim = kwargs.pop('keepdim', False)\n\n if 'dim' in keys:\n dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n # NumPy reductions don't accept dim=0 for scalar inputs\n # so we convert it to None if and only if dim is equivalent\n if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:\n kwargs['axis'] = None\n else:\n kwargs['axis'] = dim\n\n if 'keepdim' in keys and supports_keepdims:\n kwargs['keepdims'] = keepdim\n\n if 'mask' in keys:\n mask = kwargs.pop('mask')\n if mask is not None:\n kwargs['where'] = mask.cpu().numpy()\n\n if 'identity' in keys:\n identity = kwargs.pop('identity')\n if identity is not None:\n if identity.dtype is torch.bfloat16:\n identity = identity.cpu().to(torch.float32)\n else:\n identity = identity.cpu()\n kwargs['initial'] = identity.numpy()\n\n if 'unbiased' in keys:\n unbiased = kwargs.pop('unbiased')\n if unbiased is not None:\n kwargs['ddof'] = int(unbiased)\n\n result = f(x, *args, **kwargs)\n\n # Unsqueeze reduced dimensions if NumPy does not support keepdims\n if keepdim and not supports_keepdims and x.ndim > 0:\n dim = list(range(x.ndim)) if dim is None else dim\n result = np.expand_dims(result, dim)\n\n return result\n\n return wrapper\n\n\ndef reference_std_var(f):\n \"\"\"Forwards unbiased/correction kwargs as NumPy's equivalent ddof\"\"\"\n g = reference_reduction_numpy(f)\n\n @wraps(g)\n def wrapper(x: np.ndarray, *args, **kwargs):\n assert not ('unbiased' in kwargs and 'correction' in kwargs)\n\n if 'unbiased' in kwargs:\n kwargs['ddof'] = int(kwargs.pop('unbiased'))\n elif 'correction' in kwargs:\n kwargs['ddof'] = kwargs.pop('correction')\n\n return g(x, *args, **kwargs)\n\n return wrapper\n\n\ndef generate_std_var_kwargs(t: torch.Tensor, **kwargs):\n \"\"\"Generates unbiased/correction kwargs for std/var operators\"\"\"\n yield ((), {'unbiased': True})\n yield ((), {'unbiased': False})\n\n # Currently, calling std with correction is only enabled when\n # both dim and keepdim are provided.\n if 'dim' in kwargs and 'keepdim' in kwargs:\n yield ((), {'correction': 0})\n yield ((), {'correction': 1})\n\n numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()\n yield ((), {'correction': numel // 2})\n\ndef ref_pairwise_distance(input1, input2):\n pass\n\n\n# Operator database (sorted alphabetically)\nop_db: List[OpInfo] = [\n UnaryUfuncInfo('abs',\n aliases=('absolute', ),\n ref=np.abs,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat]),\n # Reference: https://github.com/pytorch/pytorch/issues/49224\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int8], active_if=TEST_WITH_ASAN),\n # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)\n # We can break the logic of the loop over all possible types but it is OK.\n # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',\n dtypes=[torch.cfloat, torch.cdouble]),\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),\n ),\n supports_inplace_autograd=False,\n assert_autodiffed=True,\n supports_sparse_csr=True,\n supports_forward_ad=True),\n # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)\n UnaryUfuncInfo('acos',\n aliases=('arccos', ),\n ref=np.arccos,\n domain=(-1, 1),\n handles_complex_extremals=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-1,\n torch.complex64: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_method_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n # NOTE: the derivative for inplace acosh is not implemented\n UnaryUfuncInfo('acosh',\n aliases=('arccosh', ),\n ref=np.arccosh,\n domain=(1, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n # Reference: https://github.com/pytorch/pytorch/issues/50692\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_method_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n ),\n # acosh is not defined at x < 1 (real) or |z| < 1 (complex)\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1),\n safe_val=2)),\n BinaryUfuncInfo('add',\n # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate\n ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \\\n else np.add(input, np.multiply(alpha, other)),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values',\n dtypes=(torch.complex64, torch.complex128)),\n )),\n BinaryUfuncInfo('mul',\n aliases=('multiply',),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n assert_autodiffed=True,\n supports_forward_ad=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True)),\n BinaryUfuncInfo('sub',\n # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate\n ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),\n aliases=('subtract',),\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n sample_inputs_func=partial(sample_inputs_add_sub, alpha=2, python_scalars=True),\n supports_inplace_autograd=False,\n decorators=(\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}),\n 'TestBinaryUfuncs', 'test_reference_numerics'),\n ),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics',\n dtypes=(torch.uint8,)),\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_small_values',\n dtypes=(torch.uint8,)),\n )),\n OpInfo('addmm',\n # This addmm OpInfo is for when alpha and beta are not both equal to 1.\n # alpha=beta=1 is tested in the following opinfo, because that special case will\n # trigger addmm being decomposed by a jit pass.\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_addmm),\n OpInfo('addmm',\n # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.\n variant_test_name='decomposed',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],\n sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),\n OpInfo('addmv',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_addmv),\n OpInfo('addbmm',\n ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),\n np.multiply(np.asarray(alpha, dtype=batch1.dtype),\n np.sum(np.matmul(batch1, batch2), axis=0))),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n backward_dtypesIfROCM=floating_types_and(torch.half),\n supports_forward_ad=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),\n torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestCommon', 'test_reference_testing')],\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+\n # and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),\n # addbmm does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # https://github.com/pytorch/pytorch/issues/55907\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_addbmm),\n OpInfo('baddbmm',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_types_and(torch.float16,\n *[torch.bfloat16] if SM53OrLater else [],\n torch.complex64, torch.complex128),\n supports_forward_ad=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),\n DecorateInfo(\n toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestMathBits', 'test_conj_view', device_type='cuda')],\n sample_inputs_func=sample_inputs_baddbmm),\n OpInfo('dot',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_dot_vdot,\n supports_forward_ad=True,\n ),\n OpInfo('vdot',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_dot_vdot,\n supports_forward_ad=True,\n ),\n OpInfo('bmm',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),\n assert_autodiffed=True,\n supports_forward_ad=True,\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+\n # and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),\n ),\n sample_inputs_func=sample_inputs_bmm),\n OpInfo('mv',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_mv),\n OpInfo('addr',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),\n ),\n sample_inputs_func=sample_inputs_addr,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('addcmul',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('addcdiv',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestCommon',\n 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n UnaryUfuncInfo('asin',\n aliases=('arcsin', ),\n ref=np.arcsin,\n domain=(-1, 1),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n safe_casts_outputs=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),\n 'TestUnaryUfuncs', device_type='cuda'),\n precisionOverride({torch.bfloat16: 1e-2}),\n ],\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n # NOTE: derivative for inplace asinh is not implemented\n UnaryUfuncInfo('asinh',\n aliases=('arcsinh', ),\n ref=np.arcsinh,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('atan',\n aliases=('arctan', ),\n ref=np.arctan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n OpInfo('atan2',\n aliases=('arctan2',),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_atan2,\n ),\n UnaryUfuncInfo('atanh',\n aliases=('arctanh', ),\n ref=np.arctanh,\n domain=(-1, 1),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n OpInfo('allclose',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ref=np.allclose,\n supports_autograd=False,\n supports_forward_ad=False,\n sample_inputs_func=sample_inputs_allclose,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('broadcast_to',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_broadcast_to),\n OpInfo('broadcast_tensors',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_broadcast_tensors),\n OpInfo('block_diag',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_block_diag),\n OpInfo('bitwise_and',\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_binary_pwise),\n UnaryUfuncInfo('bitwise_not',\n ref=np.bitwise_not,\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False),\n OpInfo('bitwise_left_shift',\n op=torch.bitwise_left_shift,\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_bitwise_shift),\n OpInfo('bitwise_right_shift',\n op=torch.bitwise_right_shift,\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_bitwise_shift),\n OpInfo('combinations',\n op=torch.combinations,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_combinations),\n OpInfo('cartesian_prod',\n op=torch.cartesian_prod,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_cartesian_prod,\n skips=(\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n )),\n OpInfo('cdist',\n dtypes=floating_types(),\n supports_out=False,\n supports_gradgrad=False,\n assert_autodiffed=False,\n sample_inputs_func=sample_inputs_cdist,\n skips=(\n # RuntimeError: _cdist_backward requires X1 to be contiguous\n DecorateInfo(unittest.skip(\"_cdist_backward requires X1 to be contiguous\"),\n 'TestCommon', 'test_noncontiguous_samples'),\n )\n ),\n UnaryUfuncInfo('ceil',\n ref=np.ceil,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n OpInfo('cholesky',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('cholesky_inverse',\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_types(),\n # TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs\n # with complex dtype.\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky_inverse,\n gradcheck_wrapper=gradcheck_wrapper_triangular_input,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True\n # for complex tensors\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # cholesky_inverse does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),)),\n OpInfo('cholesky_solve',\n op=torch.cholesky_solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_cholesky_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cholesky_solve does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),),),\n OpInfo('chunk',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_chunk,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('clone',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_clone,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('contiguous',\n op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_contiguous,\n supports_forward_ad=True,\n autodiff_fusible_nodes=['aten::contiguous'],\n assert_jit_shape_analysis=True,\n supports_out=False),\n OpInfo('sum_to_size',\n op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_sum_to_size,\n supports_forward_ad=True,\n supports_out=False,\n skips=(\n # RuntimeError: inputSet && outputSet\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":118\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),),),\n OpInfo('symeig',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_symeig,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n # NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors\n OpInfo('clamp',\n aliases=('clip',),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_clamp),\n UnaryUfuncInfo('clamp',\n variant_test_name='scalar',\n aliases=('clip', ),\n decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),\n ref=np.clip,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54841\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n ),\n sample_kwargs=sample_kwargs_clamp_scalar,\n sample_inputs_func=sample_inputs_clamp_scalar),\n UnaryUfuncInfo('positive',\n ref=np.positive,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n ),\n UnaryUfuncInfo('conj',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n supports_sparse=True,\n supports_forward_ad=True,\n supports_out=False),\n UnaryUfuncInfo('conj_physical',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # RuntimeError: inputSet && outputSet\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":118,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),\n DecorateInfo(unittest.skip(\"Skipped! conj_physical_ not implemented for sparse\"),\n 'TestSparseUnaryUfuncs', 'test_inplace'),\n )),\n OpInfo('resolve_conj',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_real,\n supports_forward_ad=True,\n supports_out=False,\n ),\n OpInfo('resolve_neg',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_real,\n supports_forward_ad=True,\n supports_out=False,\n ),\n OpInfo('view_as_real',\n dtypes=complex_types(),\n supports_forward_ad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_view_as_real,\n test_conjugated_samples=False,\n ),\n OpInfo('view_as_complex',\n dtypes=floating_types_and(torch.half),\n supports_out=False,\n supports_forward_ad=True,\n test_neg_view=False,\n sample_inputs_func=sample_inputs_view_as_complex,\n skips=(\n # RuntimeError: Tensor must have a last dimension with stride 1\n DecorateInfo(unittest.expectedFailure, \"TestCommon\", \"test_noncontiguous_samples\"),\n )),\n OpInfo('complex',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_complex,\n supports_forward_ad=True,\n ),\n OpInfo('copysign',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_copysign,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n ),\n OpInfo('corrcoef',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_corrcoef,\n supports_forward_ad=True,\n supports_out=False),\n UnaryUfuncInfo('cos',\n ref=np.cos,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n UnaryUfuncInfo('cosh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n OpInfo('cov',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_cov,\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('cross',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half),\n sample_inputs_func=sample_inputs_cross,\n supports_forward_ad=True),\n OpInfo('linalg.cross',\n ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),\n op=torch.linalg.cross,\n dtypes=all_types_and_complex(),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half),\n aten_name='linalg_cross',\n sample_inputs_func=sample_inputs_cross,\n supports_forward_ad=True),\n OpInfo('cumsum',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n skips=(\n # cumsum does not handle correctly out= dtypes\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n ),\n sample_inputs_func=sample_inputs_cumulative_ops),\n OpInfo('cumprod',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n skips=(\n # cumprod does not handle correctly out= dtypes\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n ),\n # gradgradcheck fails in fast_mode=True: #56275\n sample_inputs_func=sample_inputs_cumprod,\n gradcheck_fast_mode=False),\n OpInfo('cummax',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('cummin',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n UnaryUfuncInfo('deg2rad',\n ref=np.radians,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n OpInfo('diff',\n op=torch.diff,\n # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append\n # are set as None when converting to numpy\n ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (\n np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)\n ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_diff),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='no_rounding_mode',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True)),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='trunc_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode=\"trunc\", python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True)),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='floor_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode=\"floor\", python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True)),\n BinaryUfuncInfo('true_divide',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n sample_inputs_func=sample_inputs_binary_pwise,\n rhs_make_tensor_kwargs=dict(exclude_zero=True)),\n UnaryUfuncInfo('exp',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/issues/48010\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n safe_casts_outputs=True),\n OpInfo('expand',\n op=lambda self, shape: self.expand(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand,\n supports_forward_ad=True,\n assert_jit_shape_analysis=True,\n supports_out=False),\n OpInfo('expand_as',\n op=lambda self, other: self.expand_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_expand_as,\n supports_out=False),\n OpInfo('diag',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_diag),\n OpInfo('diag_embed',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_diagonal_diag_embed),\n OpInfo('diagonal',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_diagonal_diag_embed),\n OpInfo('diagonal_scatter',\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_diagonal_scatter),\n OpInfo('eq',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('fmax',\n op=torch.fmax,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('fmin',\n op=torch.fmin,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('fmod',\n ref=np.fmod,\n dtypes=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_fmod_remainder),\n OpInfo('fmod',\n ref=np.fmod,\n variant_test_name='autodiffed',\n dtypes=all_types_and(torch.float16, torch.bool),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),\n OpInfo('remainder',\n ref=np.remainder,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_fmod_remainder),\n OpInfo('remainder',\n ref=np.remainder,\n variant_test_name='autodiffed',\n dtypes=all_types_and(torch.float16, torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),\n supports_forward_ad=True,\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True),\n decorators=(\n # Fails on XLA\n # False is not true : Tensors failed to compare as equal!\n # Attempted to compare equality of tensors with different dtypes\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n )),\n UnaryUfuncInfo('frac',\n ref=lambda x: np.modf(x)[0],\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n # Reference for disabling extremals\n # https://github.com/pytorch/pytorch/issues/51948\n handles_extremals=False),\n SpectralFuncInfo('fft.fft',\n aten_name='fft_fft',\n ref=np.fft.fft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n ),\n SpectralFuncInfo('fft.fft2',\n aten_name='fft_fft2',\n ref=np.fft.fft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],\n ),\n SpectralFuncInfo('fft.fftn',\n aten_name='fft_fftn',\n ref=np.fft.fftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],\n ),\n SpectralFuncInfo('fft.hfft',\n aten_name='fft_hfft',\n ref=np.fft.hfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.hfft2',\n aten_name='fft_hfft2',\n ref=scipy.fft.hfft2 if has_scipy_fft else None,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.hfftn',\n aten_name='fft_hfftn',\n ref=scipy.fft.hfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.rfft',\n aten_name='fft_rfft',\n ref=np.fft.rfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfft2',\n aten_name='fft_rfft2',\n ref=np.fft.rfft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.rfftn',\n aten_name='fft_rfftn',\n ref=np.fft.rfftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.ifft',\n aten_name='fft_ifft',\n ref=np.fft.ifft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.ifft2',\n aten_name='fft_ifft2',\n ref=np.fft.ifft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ifftn',\n aten_name='fft_ifftn',\n ref=np.fft.ifftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfft',\n aten_name='fft_ihfft',\n ref=np.fft.ihfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False),\n SpectralFuncInfo('fft.ihfft2',\n aten_name='fft_ihfft2',\n ref=scipy.fft.ihfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfftn',\n aten_name='fft_ihfftn',\n ref=scipy.fft.ihfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.irfft',\n aten_name='fft_irfft',\n ref=np.fft.irfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.irfft2',\n aten_name='fft_irfft2',\n ref=np.fft.irfft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.irfftn',\n aten_name='fft_irfftn',\n ref=np.fft.irfftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n OpInfo('fft.fftshift',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),\n supports_out=False,\n supports_forward_ad=True,\n ),\n OpInfo('fft.ifftshift',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),\n supports_out=False,\n supports_forward_ad=True,\n ),\n OpInfo('stft',\n decorators=[\n skipCPUIfNoFFT,\n DecorateInfo(unittest.skip(\"Skipped! stft does not match the native function\"),\n 'TestJit', 'test_variant_consistency_jit'),\n ],\n dtypes=floating_and_complex_types(),\n sample_inputs_func=lambda *a, **kw: list(sample_inputs_stft(*a, **kw)),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n ),\n OpInfo('istft',\n decorators=[\n skipCPUIfNoFFT,\n DecorateInfo(unittest.skip(\"Skipped! istft does not match the native function\"),\n 'TestJit', 'test_variant_consistency_jit'),\n # gradcheck fails on ROCm (gh-68429)\n DecorateInfo(skipCUDAIfRocm, 'TestGradients', 'test_fn_grad'),\n ],\n dtypes=floating_and_complex_types(),\n sample_inputs_func=lambda *a, **kw: list(sample_inputs_istft(*a, **kw)),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_out=False,\n ),\n UnaryUfuncInfo('floor',\n ref=np.floor,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n OpInfo('flip',\n op=torch.flip,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_flip,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('fliplr',\n op=torch.fliplr,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('flipud',\n op=torch.flipud,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_forward_ad=True,\n supports_out=False),\n UnaryUfuncInfo('i0',\n ref=np_unary_ufunc_integer_promotion_wrapper(\n scipy.special.i0) if TEST_SCIPY else _NOTHING,\n aliases=('special.i0',),\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 5e-1}),),\n backward_dtypesIfCPU=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n backward_dtypesIfROCM=floating_types(),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_i0_i1),\n UnaryUfuncInfo('special.i0e',\n aten_name='special_i0e',\n ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 3e-1}),),\n backward_dtypesIfCPU=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n backward_dtypesIfROCM=floating_types(),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_i0_i1,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.i1',\n aten_name='special_i1',\n ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool),\n sample_inputs_func=sample_inputs_i0_i1,\n safe_casts_outputs=True,\n decorators=(\n DecorateInfo(toleranceOverride({\n torch.float32: tol(atol=1e-4, rtol=0),\n torch.bool: tol(atol=1e-4, rtol=0)})),\n ),\n skips=(\n # TODO: FIXME: jiterator does not support casting to complex outs\n DecorateInfo(unittest.skip(\"FIXME: Jiterator does not support complex outs!\"),\n \"TestUnaryUfuncs\",\n \"test_out_arg_all_dtypes\",\n device_type='cuda'),\n )),\n UnaryUfuncInfo('special.i1e',\n aten_name='special_i1e',\n ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool),\n sample_inputs_func=sample_inputs_i0_i1,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.ndtr',\n aten_name='special_ndtr',\n decorators=(precisionOverride({torch.bfloat16: 5e-3,\n torch.float16: 5e-4}),),\n ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n safe_casts_outputs=True),\n BinaryUfuncInfo('floor_divide',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n rhs_make_tensor_kwargs=dict(exclude_zero=True),\n ),\n UnaryUfuncInfo('frexp',\n op=torch.frexp,\n ref=np.frexp,\n dtypes=floating_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n # skip testing torch.frexp as it is not supported by ROCm platform yet\n decorators=[],\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,\n # while theses tests currently requires output to a single tensor.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_non_contig_expand'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_variant_consistency'),\n\n # skips test_reference_numerics due to error in Windows CI.\n # The np.frexp returns exponent as np.intc dtype on Windows platform,\n # and np.intc does not have the correspond torch dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=IS_WINDOWS),\n )),\n OpInfo('ge',\n aliases=('greater_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('geqrf',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_geqrf,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('gt',\n aliases=('greater',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n UnaryUfuncInfo('imag',\n ref=np.imag,\n dtypes=complex_types(),\n supports_out=False,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.\n check_batched_forward_grad=False,\n skips=(\n # Skip since real and imag don't have out variants.\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('gradient',\n dtypes=floating_and_complex_types_and(torch.int8, torch.int16,\n torch.int32, torch.int64,\n torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # following tests give a runtime error with undefined value tensor\n # see discussion : https://github.com/pytorch/pytorch/issues/56660\n # RuntimeError:\n # Arguments for call are not valid.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950\n ),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_gradient),\n OpInfo('inverse',\n op=torch.inverse,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('isin',\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.half),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_isin,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67432\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), # noqa: B950\n )),\n OpInfo('kthvalue',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_kthvalue,\n error_inputs_func=error_inputs_kthvalue),\n OpInfo('le',\n aliases=('less_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n aliases=('det', ),\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_and_complex_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],\n supports_inplace_autograd=False,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67512\n DecorateInfo(unittest.skip(\"67512\"), 'TestCommon', 'test_noncontiguous_samples'),\n )),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n variant_test_name='singular',\n aliases=('det', ),\n dtypes=double_types(),\n backward_dtypes=double_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det_singular,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],\n supports_inplace_autograd=False,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67512\n DecorateInfo(unittest.skip(\"67512\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed\n # Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad', device_type='cuda',\n dtypes=(torch.complex128,)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n # This test fails because singular inputs cannot be reliably\n # generated unless we're using double types\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOpInfo', 'test_unsupported_dtypes'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOpInfo', 'test_unsupported_backward',\n dtypes=(torch.float32, torch.complex64,)),\n )),\n OpInfo('linalg.cholesky',\n aten_name='linalg_cholesky',\n dtypes=floating_and_complex_types(),\n # TODO: RuntimeError: While computing batched gradients,\n # got: vmap: Calling Tensor.as_strided is not supported\n # unless the batch dims being vmapped over are at the front of the tensor (in memory layout).\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('linalg.cholesky_ex',\n aten_name='linalg_cholesky_ex',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('linalg.cond',\n aten_name='linalg_cond',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_cond,\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('linalg.eig',\n aten_name='linalg_eig',\n op=torch.linalg.eig,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_eig,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Disabled due to https://github.com/pytorch/pytorch/issues/67367\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'),),\n ),\n OpInfo('linalg.eigvals',\n aten_name='linalg_eigvals',\n op=torch.linalg.eigvals,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Disabled due to https://github.com/pytorch/pytorch/issues/67367\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'),\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('linalg.eigh',\n aten_name='linalg_eigh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # See: https://github.com/pytorch/pytorch/issues/67367\n # This DecorateInfo should change to `dtypes=complex_dtypes()` after the above\n # has been resolved.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=floating_and_complex_types()),),\n ),\n OpInfo('linalg.eigvalsh',\n aten_name='linalg_eigvalsh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Gradcheck for complex is not implemented yet\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('linalg.householder_product',\n aten_name='linalg_householder_product',\n op=torch.linalg.householder_product,\n aliases=('orgqr', ),\n dtypes=floating_and_complex_types(),\n # TODO: backward uses in-place operations that vmap doesn't like\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_householder_product,\n decorators=[\n skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,\n DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})),\n ]),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n dtypes=floating_and_complex_types(),\n supports_out=True,\n sample_inputs_func=sample_inputs_linalg_lstsq,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # we skip gradient checks for this suite as they are tested in\n # variant_test_name='grad_oriented'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n )),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n variant_test_name='grad_oriented',\n # gradchecks for forward AD fails with multi-Tensor outputs\n op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0],\n supports_out=False,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_lstsq,\n supports_autograd=True,\n supports_forward_ad=True,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # tests do not work with passing lambda for op\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('linalg.matrix_power',\n aliases=('matrix_power',),\n aten_name='linalg_matrix_power',\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n check_batched_grad=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],\n sample_inputs_func=sample_inputs_linalg_matrix_power,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),),\n OpInfo('linalg.multi_dot',\n # Need this lambda because gradcheck does not work with TensorList inputs\n aten_name='linalg_multi_dot',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n supports_inplace_autograd=False,\n # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_linalg_multi_dot,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Fails on XLA.\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n )),\n OpInfo('linalg.norm',\n op=torch.linalg.norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_norm,\n aten_name='linalg_norm',\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n )),\n OpInfo('linalg.matrix_norm',\n aten_name='linalg_matrix_norm',\n dtypes=floating_and_complex_types(),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_matrix_norm,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n )),\n OpInfo('linalg.qr',\n aten_name='linalg_qr',\n op=torch.linalg.qr,\n dtypes=floating_and_complex_types(),\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_linalg_qr,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.slogdet',\n aten_name='linalg_slogdet',\n op=torch.linalg.slogdet,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_slogdet,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),\n )),\n OpInfo('linalg.vector_norm',\n op=torch.linalg.vector_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_vector_norm,\n aten_name='linalg_vector_norm'),\n UnaryUfuncInfo('log',\n ref=np.log,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n ),\n # log(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n UnaryUfuncInfo('log10',\n ref=np.log10,\n domain=(0, None),\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n ),\n # log10(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n UnaryUfuncInfo('log1p',\n ref=np.log1p,\n aliases=('special.log1p',),\n domain=(-1, None),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('log2',\n ref=np.log2,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n # log2(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n BinaryUfuncInfo('ldexp',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_binary_pwise,\n promotes_int_to_float=True,\n supports_out=True,\n skips=(\n # RuntimeError: mul(): functions with out=... arguments don't support\n # automatic differentiation, but one of the arguments requires grad\n # https://github.com/pytorch/pytorch/issues/68966\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),\n # FIXME: ldexp does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n ),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.complex64: tol(atol=1e-05, rtol=1e-05)\n }),\n 'TestCommon', device_type='cpu',\n ),\n ], ),\n OpInfo('logaddexp',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n OpInfo('logaddexp2',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n UnaryUfuncInfo('logical_not',\n ref=np.logical_not,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_autograd=False,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # The function variant always returns BoolTensor\n # while the inplace variant preserves the input dtype.\n # >>> t = torch.randn(3)\n # >>> torch.logical_not(t)\n # tensor([False, False, False])\n # >>> torch.logical_not(t).dtype\n # torch.bool\n # >>> t.logical_not_().dtype\n # torch.float32\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_variant_consistency',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n )),\n OpInfo('lt',\n aliases=('less',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('lu',\n op=torch.lu,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n # we use in-place operations which cannot be avoided.\n # This causes vmap failures, hence we skip batched gradient checks\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_lu,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # we skip jit tests because `lu` is a torch function\n # RuntimeError:\n # 'Tensor (inferred)' object has no attribute or method 'lu'.:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return i0.lu(True, True)\n # ~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('lu_solve',\n op=torch.lu_solve,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_lu_solve,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # RuntimeError: lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950\n )),\n OpInfo('lu_unpack',\n op=torch.lu_unpack,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n # we use in-place operations which cannot be avoided.\n # This causes vmap failures, hence we skip batched gradient checks\n check_batched_grad=False,\n supports_out=True,\n sample_inputs_func=sample_inputs_lu_unpack,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # LU_pivots is expected to be a contiguous tensor\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n )),\n OpInfo('masked_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_fill,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('masked_scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_scatter,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('masked_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_masked_select),\n OpInfo('matrix_exp',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n aliases=('linalg.matrix_exp',),\n sample_inputs_func=sample_inputs_matrix_exp,\n # Needs to construct a 2nx2n matrix by copy_ ing into it\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n ),\n OpInfo('matmul',\n aliases=('linalg.matmul',),\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n sample_inputs_func=sample_inputs_matmul,\n skips=(\n # ROCm intermittently fails the test with standard atol/rtol\n DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),\n 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ROCM),\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cpu', dtypes=(torch.long,)),\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo',\n device_type='xla', dtypes=(torch.long,)),\n )),\n OpInfo('max',\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n supports_forward_ad=True),\n OpInfo('max',\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('median',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n # TODO: some signatures of median do support out\n supports_out=False,\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),\n OpInfo('nanmedian',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n # TODO: some signatures of nanmedian do support out\n supports_out=False,\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),\n OpInfo('var_mean',\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),\n backward_dtypes=floating_types_and(torch.half),\n backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.half),\n # TODO: some signatures of var_mean do support out\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67539\n DecorateInfo(unittest.skip(\"67539\"), 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ASAN, device_type='cpu'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # TODO: review with var_mean tests in test_autograd.py\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'))),\n OpInfo('std_mean',\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),\n backward_dtypes=floating_types_and(torch.half),\n backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.half),\n # TODO: some signatures of std_mean do support out\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67539\n DecorateInfo(unittest.skip(\"67539\"), 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ASAN, device_type='cpu'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # TODO: fix along with var_mean autograd tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'))),\n OpInfo('meshgrid',\n variant_test_name='variadic_tensors',\n ref=np.meshgrid,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),\n sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),\n skips=[\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # meshgrid is defined in torch.functional to take a\n # variadic list of tensors. Variadic parameters are not\n # compatible with the normalize operator tests.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # Skip operator schema test because this is a functional and not an operator\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ],\n supports_out=False,\n supports_forward_ad=True),\n OpInfo('meshgrid',\n variant_test_name='list_of_tensors',\n # Unlike the variant above, we do not use np.meshgrid as a\n # ref since it does not officially support list of numpy\n # arrays.\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),\n sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),\n skips=[\n # meshgrid is defined in torch.functional to take a\n # variadic list of tensors. Variadic parameters are not\n # compatible with the normalize operator tests.\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n ],\n assert_autodiffed=True,\n supports_out=False,\n autodiff_nonfusible_nodes=[],\n supports_forward_ad=True),\n OpInfo('min',\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n supports_forward_ad=True),\n OpInfo('min',\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('quantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('nanquantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n BinaryUfuncInfo(\n 'max',\n aliases=('maximum',),\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n supports_forward_ad=True,\n assert_autodiffed=True,\n ref=np.maximum,\n skips=(\n # FIXME: maximum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"max_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'maximum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n ref=np.maximum,\n skips=(\n # FIXME: maximum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"max_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'min',\n aliases=('minimum',),\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n supports_forward_ad=True,\n assert_autodiffed=True,\n ref=np.minimum,\n skips=(\n # FIXME: min does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"min_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'minimum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n ref=np.minimum,\n skips=(\n # FIXME: minimum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"min_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo('logical_and',\n ref=np.logical_and,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # FIXME: logical_and does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('logical_or',\n ref=np.logical_or,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # FIXME: logical_or does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('logical_xor',\n ref=np.logical_xor,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # FIXME: logical_xor does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('bitwise_or',\n ref=np.bitwise_or,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: RuntimeError: \"bitwise_or_cuda\" not implemented for 'Half'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n )),\n BinaryUfuncInfo('bitwise_xor',\n ref=np.bitwise_xor,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: RuntimeError: \"bitwise_xor_cuda\" not implemented for 'Half'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n )),\n BinaryUfuncInfo('heaviside',\n ref=lambda a, b: (\n # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64\n np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)\n ),\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n # FIXME: heaviside does not accept scalar inputs\n skips=(\n # NumPy's heaviside promotes bool to float16\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_reference_numerics_heavisidel',\n dtypes=(torch.bool,)),\n # RuntimeError: heaviside is not yet implemented for tensors with different dtypes.\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion'),\n # PyTorch's heaviside does not appear to propagate NaNs\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values'),\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('lcm',\n ref=np.lcm,\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: lcm doesn't support scalars\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('gcd',\n ref=np.gcd,\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_reference_numerics_small_values',\n dtypes=(torch.int8,)),\n # TODO: FIXME: jiterator doesn't support non-tensor inputs\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_broadcast_python_scalar'),\n # TODO: FIXME: jiterator doesn't support casts to unsupported types\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'))),\n BinaryUfuncInfo('isclose',\n ref=np.isclose,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_isclose,\n supports_autograd=False,\n supports_out=False,\n skips=(\n # RuntimeError: Short did not match Int\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion'),\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values'),\n # FIXME: isclose does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n # `softmax` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n # https://github.com/pytorch/pytorch/issues/68752\n OpInfo('softmax',\n aliases=('special.softmax', 'nn.functional.softmax',),\n aten_name='softmax',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_jit_shape_analysis=True,\n assert_autodiffed=True,\n supports_out=False),\n OpInfo('softmax',\n aliases=('special.softmax', 'nn.functional.softmax',),\n variant_test_name=\"with_dtype\",\n aten_name='softmax',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=True,\n supports_out=False),\n # `softmin` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n # https://github.com/pytorch/pytorch/issues/68752\n OpInfo('nn.functional.softmin',\n aten_name='softmin',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_jit_shape_analysis=False,\n assert_autodiffed=False,\n supports_out=False),\n OpInfo('nn.functional.softmin',\n variant_test_name=\"with_dtype\",\n aten_name='softmin',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=False,\n supports_out=False),\n OpInfo(\n \"nn.functional.cross_entropy\",\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_cross_entropy,\n supports_out=False,\n decorators=(\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n device_type=\"cpu\",\n ),\n ),\n skips=(\n # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536\n # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked\n # 1536 bytes CUDA memory on device 0\n DecorateInfo(\n unittest.expectedFailure,\n \"TestJit\",\n \"test_variant_consistency_jit\",\n device_type=\"cuda\",\n ),\n )\n ),\n OpInfo('nn.functional.normalize',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_normalize),\n OpInfo('aminmax',\n ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(onlyNativeDeviceTypes,),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_aminmax),\n OpInfo('as_strided',\n op=lambda x, size, stride, storage_offset=0:\n torch.as_strided(x, size, stride, storage_offset=storage_offset),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_as_strided,\n skips=(\n # FIXME: AssertionError: False is not true : Tensors failed to compare as equal!\n # With rtol=1e-07 and atol=1e-07, found 1 element(s) (out of 1) whose difference(s)\n # exceeded the margin of error (including 0 nan comparisons). The greatest difference\n # was 1.0 (1.0 vs. -0.0), which occurred at index 0.\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),\n # AssertionError: False is not true : Scalars failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),)),\n OpInfo('nn.functional.cosine_similarity',\n aten_name=\"cosine_similarity\",\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_cosine_similarity),\n OpInfo('nn.functional.adaptive_avg_pool1d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool1d),\n OpInfo('nn.functional.adaptive_avg_pool2d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, int]'. :\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool2d),\n OpInfo('nn.functional.adaptive_avg_pool3d',\n dtypes=floating_types_and(torch.half),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, NoneType, NoneType]'. :\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n #\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool3d),\n OpInfo('nn.functional.adaptive_max_pool1d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool1d),\n OpInfo('nn.functional.adaptive_max_pool2d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, int]'. :\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool2d),\n OpInfo('nn.functional.adaptive_max_pool3d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, NoneType, NoneType]'. :\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n #\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool3d),\n OpInfo('nn.functional.avg_pool1d',\n aten_name='avg_pool1d',\n supports_autograd=True,\n supports_out=False,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_avgpool1d),\n OpInfo('nn.functional.avg_pool3d',\n aten_name='avg_pool3d',\n supports_autograd=True,\n supports_out=False,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_avgpool3d),\n OpInfo('nn.functional.relu',\n aten_name=\"relu\",\n supports_autograd=True,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_nn_activation_relu,\n supports_out=False,\n supports_forward_ad=True),\n OpInfo('nn.functional.conv_transpose1d',\n aten_name='conv_transpose1d',\n aliases=('conv_transpose1d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose1d,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv_transpose2d',\n aten_name='conv_transpose2d',\n aliases=('conv_transpose2d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose2d,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv_transpose3d',\n aten_name='conv_transpose3d',\n aliases=('conv_transpose3d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose3d,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv1d',\n aliases=('conv1d',),\n aten_name='conv1d',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv1d,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":103, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv2d',\n aliases=('conv2d',),\n aten_name='conv2d',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=partial(sample_inputs_conv2d),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":103, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.group_norm',\n aten_name='group_norm',\n aliases=('group_norm',),\n ref=reference_group_norm,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n decorators=[\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n sample_inputs_func=sample_inputs_group_norm,),\n OpInfo('nn.functional.instance_norm',\n # no ref because instance_norm will often have numerical instability (large numbers or nan)\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n decorators=[\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n sample_inputs_func=sample_inputs_instance_norm,),\n OpInfo('nn.functional.layer_norm',\n aten_name='layer_norm',\n aliases=('layer_norm',),\n ref=reference_layer_norm,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),\n 'TestCommon', 'test_reference_testing'\n )\n ],\n sample_inputs_func=sample_inputs_layer_norm,),\n OpInfo('nn.functional.local_response_norm',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n decorators=[\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n sample_inputs_func=sample_inputs_local_response_norm,),\n OpInfo('nn.functional.pad',\n variant_test_name='constant',\n aten_name='constant_pad_nd',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='reflect',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='replicate',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='circular',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),\n supports_forward_ad=True,\n check_batched_grad=False,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n supports_out=False),\n OpInfo('nn.functional.hardswish',\n aten_name=\"hardswish\",\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_hardswish,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=False,\n supports_forward_ad=True,\n supports_out=False,\n autodiff_nonfusible_nodes=[\"aten::hardswish\"]),\n OpInfo('nn.functional.unfold',\n aten_name='im2col',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_nn_unfold,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='nearest',\n supports_autograd=True,\n dtypes=floating_types_and(torch.uint8),\n dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),\n sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='linear',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='bilinear',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='bicubic',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='trilinear',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='area',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_interpolate, 'area'),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.upsample_bilinear',\n supports_autograd=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.upsample_nearest',\n supports_autograd=True,\n dtypes=floating_types_and(torch.uint8),\n dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.leaky_relu',\n aliases=None,\n aten_name=\"leaky_relu\",\n sample_inputs_func=sample_inputs_leaky_relu,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=True,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=[\"aten::leaky_relu\"]),\n OpInfo('nn.functional.avg_pool2d',\n aten_name='avg_pool2d',\n supports_autograd=True,\n supports_out=False,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_avgpool2d),\n OpInfo('nn.functional.fractional_max_pool2d',\n supports_autograd=True,\n supports_out=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n test_neg_view=False,\n sample_inputs_func=sample_inputs_fractional_max_pool2d,\n decorators=[\n # FIXME: both derivatives are implemented incorrectly\n # https://github.com/pytorch/pytorch/issues/69322\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n # FIXME: produces incorrect output on non-contiguous inputs\n # https://github.com/pytorch/pytorch/issues/69325\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),\n # FIXME: AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ], ),\n OpInfo('nn.functional.fractional_max_pool3d',\n supports_autograd=True,\n supports_out=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n test_neg_view=False,\n sample_inputs_func=sample_inputs_fractional_max_pool3d,\n decorators=[\n # FIXME: both derivatives are implemented incorrectly\n # https://github.com/pytorch/pytorch/issues/69322\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n # FIXME: produces incorrect output on non-contiguous inputs\n # https://github.com/pytorch/pytorch/issues/69325\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),\n # FIXME: AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ], ),\n OpInfo('nn.functional.max_pool1d',\n aten_name='max_pool1d',\n supports_autograd=True,\n supports_out=False,\n # TODO: add shape checks\n assert_jit_shape_analysis=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.max_pool2d',\n aten_name='max_pool2d',\n supports_autograd=True,\n # Vmap is not happy with non-contiguous (channels_last) inputs\n check_batched_gradgrad=False,\n supports_out=False,\n assert_jit_shape_analysis=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.max_pool3d',\n aten_name='max_pool3d',\n supports_autograd=True,\n supports_out=False,\n # TODO: add shape checks\n assert_jit_shape_analysis=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n # TODO: investigate nondeterminism\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.linear',\n aten_name='linear',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_linear,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,\n *[torch.bfloat16] if CUDA11OrLater else []),\n # linear calls mm under the hood which is nondeterministic on CUDA\n # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('nn.functional.bilinear',\n aten_name='bilinear',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_bilinear,\n dtypes=all_types_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+ and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n ),\n supports_forward_ad=False,\n supports_out=False),\n OpInfo('nn.functional.glu',\n aten_name='glu',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_glu,\n dtypes=floating_types(),\n dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_out=False),\n UnaryUfuncInfo(\n 'nn.functional.elu',\n ref=lambda x, alpha=1.0, inplace=False:\n np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'alpha': 0.8}, {'alpha': 0.8}),\n inplace_variant=lambda x, alpha=1.0:\n torch.nn.functional.elu(x, alpha, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n OpInfo(\n 'nn.functional.prelu',\n ref=lambda x, weight:\n np.maximum(0., x) + np.minimum(0., x) *\n (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_nn_functional_prelu,\n decorators=[\n # FIXME: second derivative is implemented but seems to be incorrect\n # https://github.com/pytorch/pytorch/issues/68760\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n # https://github.com/pytorch/pytorch/issues/68752\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.celu',\n ref=lambda x, alpha=1.0, inplace=False:\n np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'alpha': 0.8}, {'alpha': 0.8}),\n inplace_variant=lambda x, alpha=1.0:\n torch.nn.functional.celu(x, alpha, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.rrelu',\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}),\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ),\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n 'TestJit', 'test_variant_consistency_jit'\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.selu',\n ref=lambda x, inplace=False:\n 1.0507009873554804934193349852946 * (\n np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))\n ),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-2, rtol=1.8e-2),\n torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.silu',\n ref=lambda x, inplace=False:\n x / (1 + np.exp(-x)),\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=False,\n assert_autodiffed=False,\n supports_out=False,\n inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-3, rtol=1e-3),\n torch.bfloat16: tol(atol=1e-4, rtol=1e-4)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n skips=[\n # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', dtypes=(torch.complex64,)), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.hardsigmoid',\n ref=reference_hardsigmoid,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=False,\n supports_out=False,\n inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],\n skips=[\n # still want to test that first derivative works though second derivative isn't supported\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_inplace_gradgrad\"),\n # produces 0 instead of nan on ROCM\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_extremal\",\n dtypes=(torch.bfloat16, torch.float16, torch.float32,), device_type='cuda',\n active_if=(TEST_WITH_ROCM)), ]\n ),\n UnaryUfuncInfo(\n 'nn.functional.logsigmoid',\n aten_name=\"log_sigmoid\",\n ref=reference_logsigmoid,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n ),\n UnaryUfuncInfo(\n 'nn.functional.mish',\n ref=lambda x: x * np.tanh(reference_softplus(x)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n inplace_variant=partial(torch.nn.functional.mish, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.softsign',\n ref=lambda x: x / (np.abs(x) + 1),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],\n skips=(\n # pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j)\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',\n \"test_reference_numerics_hard\", dtypes=(torch.complex64,)),),\n ),\n UnaryUfuncInfo(\n 'nn.functional.tanhshrink',\n ref=lambda x: x - np.tanh(x),\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), ],\n skips=(\n # in each case, pytorch will produce a nan while numpy will not\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_normal\",\n dtypes=(torch.complex64,), active_if=(IS_MACOS)),\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_hard\",\n dtypes=(torch.complex64,), active_if=(IS_MACOS)),\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_extremal\",\n dtypes=(torch.complex64,), device_type='cpu',\n active_if=(IS_MACOS or IS_WINDOWS)),)\n ),\n OpInfo(\n 'nn.functional.threshold',\n ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_threshold,\n ),\n OpInfo('nextafter',\n dtypes=floating_types_and(torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_nextafter),\n OpInfo('topk',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_topk),\n # Multiple variants for batch_norm to test with and without cuDNN disabled\n # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details\n OpInfo('nn.functional.batch_norm',\n aten_name='batch_norm',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_batch_norm),\n # This variant tests batch_norm with cuDNN disabled only on CUDA devices\n OpInfo('nn.functional.batch_norm',\n variant_test_name='without_cudnn',\n aten_name='batch_norm',\n dtypes=empty_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n decorators=[onlyCUDA, disablecuDNN],\n sample_inputs_func=sample_inputs_batch_norm),\n # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the\n # standard entry, second is to run gradcheck tests on the second argument.\n OpInfo('igamma',\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n aliases=('torch.special.gammainc',),\n dtypesIfCUDA=floating_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_igamma_igammac),\n OpInfo('igamma',\n variant_test_name='grad_other',\n # Since autograd formula is implemented only for other and\n # gradcheck test verifies the formula for input in SampleInput,\n # we permute the arguments.\n op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),\n inplace_variant=None,\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n backward_dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n supports_inplace_autograd=False,\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # test fails are we permute the arguments function variant\n # but not for inplace or method.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_igamma_igammac),\n OpInfo('igammac',\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n aliases=('torch.special.gammaincc',),\n dtypesIfCUDA=floating_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_igamma_igammac),\n OpInfo('igammac',\n variant_test_name='grad_other',\n # Since autograd formula is implemented only for other and\n # gradcheck test verifies the formula for input in SampleInput,\n # we permute the arguments\n op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),\n inplace_variant=None,\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n backward_dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n supports_inplace_autograd=False,\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # test fails are we permute the arguments function variant\n # but not for inplace or method.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_igamma_igammac),\n OpInfo('nn.functional.softshrink',\n aten_name=\"softshrink\",\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=False,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=False,\n ),\n OpInfo('nn.functional.hardshrink',\n aten_name=\"hardshrink\",\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=[\"aten::hardshrink\"]),\n OpInfo('nn.functional.hardtanh',\n aten_name=\"hardtanh\",\n dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),\n backward_dtypesIfCPU=all_types(),\n dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=[\"aten::hardtanh\"],\n ),\n OpInfo('nn.functional.gelu',\n aten_name=\"gelu\",\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_gelu,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=True,\n supports_out=False,\n autodiff_nonfusible_nodes=[\"aten::gelu\"]),\n OpInfo('nn.functional.relu6',\n aten_name=\"relu6\",\n dtypes=all_types_and(torch.bfloat16),\n backward_dtypesIfCPU=floating_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=[\"aten::relu6\"]),\n OpInfo('mm',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_mm),\n OpInfo('mode',\n op=torch.mode,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_mode,),\n MvlGammaInfo(variant_test_name='mvlgamma_p_1',\n domain=(1, None),\n skips=skips_mvlgamma(),\n sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_3',\n domain=(2, None),\n skips=skips_mvlgamma(skip_redundant=True) + (\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_5',\n domain=(3, None),\n skips=skips_mvlgamma(skip_redundant=True) + (\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),\n OpInfo('ne',\n aliases=('not_equal',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('narrow',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_narrow),\n UnaryUfuncInfo('neg',\n aliases=('negative', ),\n ref=np.negative,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n error_inputs_func=error_inputs_neg,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True,),\n OpInfo('dist',\n op=torch.dist,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_dist),\n OpInfo('outer',\n op=torch.outer,\n aliases=('ger', ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_outer,),\n OpInfo('ormqr',\n op=torch.ormqr,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_ormqr,\n decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),\n OpInfo('permute',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_permute),\n OpInfo('pow',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled\n # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently\n # unsupported on CPU.\n backward_dtypes=floating_and_complex_types_and(torch.bfloat16),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_pow,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n assert_autodiffed=True,\n ),\n OpInfo('float_power',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_pow,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_conj_view', device_type='cuda'),),),\n OpInfo('qr',\n op=torch.qr,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_qr,\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n UnaryUfuncInfo('rad2deg',\n ref=np.degrees,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n UnaryUfuncInfo('real',\n ref=np.real,\n dtypes=complex_types(),\n supports_out=False,\n supports_forward_ad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Skip since real and imag don't have out variants.\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('roll',\n ref=np.roll,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_roll),\n OpInfo('rot90',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_rot90),\n UnaryUfuncInfo('round',\n ref=np.round,\n aliases=('special.round',),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True,),\n UnaryUfuncInfo('sin',\n ref=np.sin,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),\n UnaryUfuncInfo('sinc',\n ref=np_sinc_with_fp16_as_fp32,\n aliases=('special.sinc',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2,\n torch.float16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/49133\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat]),\n )),\n UnaryUfuncInfo('sinh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n decorators=(precisionOverride({torch.float16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('sign',\n ref=reference_sign,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n )),\n UnaryUfuncInfo('sgn',\n ref=reference_sgn,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n # Reference: https://github.com/pytorch/pytorch/issues/53958\n # Test fails in comparison on Nan as the `equal_nan` is True for\n # comparing the CPU tensors.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.complex64, torch.complex128]),\n # Reference: https://github.com/pytorch/pytorch/issues/48486\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.complex64]),\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n dtypes=complex_types()),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n OpInfo('split',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=partial(sample_inputs_split, list_args=False),\n supports_forward_ad=True,\n supports_out=False,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_autodiffed=True),\n OpInfo('split',\n variant_test_name='list_args',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=partial(sample_inputs_split, list_args=True),\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('split_with_sizes',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_split_with_sizes,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n supports_out=False,\n supports_forward_ad=True,\n assert_autodiffed=True),\n OpInfo('__radd__',\n op=torch.Tensor.__radd__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __radd__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__radd__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=['aten::add'],),\n OpInfo('__rdiv__',\n op=torch.Tensor.__rdiv__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rdiv__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rdiv__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n supports_forward_ad=True,\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),\n OpInfo('__rmul__',\n op=torch.Tensor.__rmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rmul__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rmul__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n autodiff_nonfusible_nodes=['aten::mul'],),\n OpInfo('__rand__',\n op=torch.Tensor.__rand__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n OpInfo('__ror__',\n op=torch.Tensor.__ror__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n OpInfo('__rxor__',\n op=torch.Tensor.__rxor__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n OpInfo('__rmatmul__',\n op=torch.Tensor.__rmatmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],\n torch.complex64, torch.complex128),\n backward_dtypesIfCUDA=floating_types_and(torch.float16,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],\n torch.complex64, torch.complex128),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_matmul,\n supports_out=False,\n decorators=(\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cpu', dtypes=(torch.long,)),\n DecorateInfo(\n toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestMathBits', 'test_conj_view'),\n # Fails on XLA.\n # AssertionError: False is not true : Tensors failed to compare as equal\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n ),\n skips=(\n # RuntimeError:\n # object has no attribute __rmatmul__:\n # File \"<string>\", line 3\n # def the_method(i0, i1):\n # return torch.__rmatmul__(i0, i1)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n )),\n OpInfo('__rmod__',\n op=torch.Tensor.__rmod__,\n dtypes=floating_types_and(torch.bfloat16, torch.half,),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rmod__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rmod__(i0, 3.14)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n # Support autograd after torch.remainder(Tensor, Tensor) supports\n # autograd of the second argument.\n # https://github.com/pytorch/pytorch/pull/58476/files#r637167630\n supports_autograd=False,\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::remainder'],),\n OpInfo('__rpow__',\n op=torch.Tensor.__rpow__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n # Reference: https://github.com/pytorch/pytorch/issues/54774\n # \"log2\" \"_vml_cpu\" not implemented for Half\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # RuntimeError:\n # object has no attribute __rpow__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rpow__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::pow'],),\n OpInfo('__rsub__',\n op=torch.Tensor.__rsub__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rsub__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rsub__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::rsub'],),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_tensor',\n supports_out=False,\n supports_inplace_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":52,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950\n ),\n sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_scalar',\n supports_out=False,\n supports_inplace_autograd=False,\n sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":52,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950\n ),\n assert_autodiffed=True,),\n OpInfo('select',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select,\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('select_scatter',\n dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select_scatter,\n supports_forward_ad=True,\n supports_out=False),\n OpInfo('slice_scatter',\n dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_slice_scatter,\n supports_forward_ad=True,\n supports_out=False),\n UnaryUfuncInfo('signbit',\n ref=np.signbit,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False,),\n OpInfo('solve',\n op=torch.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n UnaryUfuncInfo('tan',\n ref=np.tan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.float64],\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # tan(pi/2 * odd_number) is nan\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),\n UnaryUfuncInfo('tanh',\n ref=np.tanh,\n aliases=('nn.functional.tanh',),\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"tanh_backward_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # alias, nn.functional.tanh, will produce (because of warning string saved):\n # \"RuntimeError: Expected to not find \"tanh\" but found it\"\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # tan(j * pi/2 * odd_number) is nan\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (close_to_int(x / (math.pi * 0.5j))\n if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),\n safe_val=0)),\n OpInfo('tensor_split',\n ref=np.array_split,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n sample_inputs_func=sample_inputs_tensor_split,),\n OpInfo('hsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_hsplit,),\n OpInfo('vsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_vsplit,),\n OpInfo('dsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_dsplit,),\n OpInfo('triangular_solve',\n op=torch.triangular_solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),\n decorators=[skipCUDAIfNoMagma]),\n UnaryUfuncInfo('trunc',\n aliases=('fix', ),\n ref=np.trunc,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('exp2',\n aliases=('special.exp2', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('expm1',\n aliases=('special.expm1', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('nan_to_num',\n ref=np.nan_to_num,\n dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),\n supports_forward_ad=True,\n supports_sparse=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # Passing numpy_kwargs via sample_kwargs, as numpy does comparison\n # with BFloat16 in float, since it currently doesn't support BFloat16.\n # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556\n sample_kwargs=lambda device, dtype, input: ({},\n {'posinf': torch.finfo(torch.bfloat16).max,\n 'neginf': torch.finfo(torch.bfloat16).min})\n if dtype is torch.bfloat16 else ({}, {})),\n UnaryUfuncInfo('reciprocal',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n safe_casts_outputs=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/45690\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('rsqrt',\n ref=lambda x: np.reciprocal(np.sqrt(x)),\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.half: 5e-2}),),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('sqrt',\n ref=np.sqrt,\n supports_sparse=True,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_sparse_csr=True,\n decorators=(precisionOverride({torch.bfloat16: 7e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/47358\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_MACOS),\n # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n safe_casts_outputs=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('square',\n ref=np.square,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/52549\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble]),\n # >>> t = torch.tensor(complex(-0.01, float(\"inf\")))\n # >>> np.square(t.numpy())\n # (-inf-infj)\n # >>> t.square()\n # tensor(-inf-infj)\n # >>> t.cuda().square()\n # tensor(inf+nanj, device='cuda:0')\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),),\n OpInfo('lerp',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_lerp,\n supports_forward_ad=True,\n assert_autodiffed=True),\n OpInfo('linalg.inv',\n aten_name='linalg_inv',\n op=torch.linalg.inv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo('linalg.inv_ex',\n aten_name='linalg_inv_ex',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n UnaryUfuncInfo('angle',\n ref=np.angle,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_sparse_csr=True,\n supports_complex_to_float=True,\n skips=(\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),),),\n UnaryUfuncInfo('isfinite',\n ref=np.isfinite,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/66402\n DecorateInfo(unittest.expectedFailure, \"TestUnaryUfuncs\", \"test_reference_numerics_hard\",\n device_type='cpu', dtypes=(torch.complex64,), active_if=not (IS_MACOS or IS_WINDOWS)),\n )),\n UnaryUfuncInfo('isinf',\n ref=np.isinf,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isposinf',\n ref=np.isposinf,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isneginf',\n ref=np.isneginf,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isreal',\n ref=np.isreal,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_autograd=False),\n UnaryUfuncInfo('isnan',\n ref=np.isnan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n OpInfo('linalg.solve',\n aten_name='linalg_solve',\n op=torch.linalg.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.solve_triangular',\n aten_name='linalg_solve_triangular',\n op=torch.linalg.solve_triangular,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve_triangular,\n # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);\n supports_forward_ad=True),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n op=torch.linalg.pinv,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_pinv,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # errors with \"leaked XXXX bytes CUDA memory on device 0\"\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),\n )),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='singular',\n # pinv is Frechet-differentiable in a rank-preserving neighborhood,\n # so we feed inputs that are the products of two full-rank factors,\n # to avoid any rank changes caused by the perturbations in the gradcheck\n op=lambda a, b: torch.linalg.pinv(a @ b.mT),\n dtypes=floating_and_complex_types(),\n supports_out=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_pinv_singular,\n # Only large tensors show issues with implicit backward used prior to\n # explicit backward implementation.\n decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # test does not work with passing lambda for op\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('eig',\n op=torch.eig,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_eig,\n decorators=[\n skipCUDAIfNoMagma,\n skipCPUIfNoLapack,\n skipCUDAIfRocm\n ],),\n OpInfo('einsum',\n # we need this lambda because SampleInput expects tensor input as the first argument\n # TODO(@heitorschueroff) update SampleInput to handle such cases\n op=lambda tensors, equation: torch.einsum(equation, tensors),\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),\n supports_out=False,\n supports_forward_ad=True,\n check_batched_forward_grad=False,\n # See https://github.com/pytorch/pytorch/issues/66357\n sample_inputs_func=sample_inputs_einsum,\n skips=(\n # test does not work with passing lambda for op\n # there's a test `test_einsum` in `test_jit.py` to handle this case\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('svd',\n op=torch.svd,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n ]),\n OpInfo('linalg.svd',\n op=torch.linalg.svd,\n aten_name='linalg_svd',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n ]),\n OpInfo('linalg.svdvals',\n op=torch.linalg.svdvals,\n aten_name='linalg_svdvals',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svdvals,\n check_batched_gradgrad=False,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCPUIfNoLapack]),\n OpInfo('polar',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_polar),\n # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.\n # To test reference numerics against multiple values of argument `n`,\n # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).\n # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),\n # A separate OpInfo entry for special.polygamma is needed to reorder the arguments\n # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939\n UnaryUfuncInfo('special.polygamma',\n op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),\n variant_test_name='special_polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_1',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_2',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_3',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_4',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n OpInfo('ravel',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_ravel,\n ),\n OpInfo('reshape',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_reshape,\n supports_out=False,\n supports_forward_ad=True,\n ),\n OpInfo('reshape_as',\n op=lambda x, other: x.reshape_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n supports_out=False,\n supports_forward_ad=True,\n ),\n OpInfo('view',\n op=lambda x, shape: x.view(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n assert_jit_shape_analysis=True,\n sample_inputs_func=sample_inputs_view_reshape,\n ),\n OpInfo('view_as',\n op=lambda x, other: x.view_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n ),\n OpInfo('atleast_1d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n ),\n OpInfo('atleast_2d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n ),\n OpInfo('atleast_3d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n ),\n OpInfo('pinverse',\n op=torch.pinverse,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('gather',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_gather,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_forward_ad=True,\n ),\n OpInfo('index_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index_fill),\n OpInfo('index_copy',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index_copy,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_index_select,\n supports_forward_ad=True,\n assert_jit_shape_analysis=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index_add,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('__getitem__',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=False,\n supports_scripting=False,\n op=torch.Tensor.__getitem__,\n skips=(\n # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),\n ),\n assert_jit_shape_analysis=False, # TODO: support index.Tensor()\n sample_inputs_func=sample_inputs_getitem,),\n OpInfo('index_put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=True,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n test_neg_view=False,\n sample_inputs_func=sample_inputs_index_put,\n skips=(\n # RuntimeError: The following operation failed in the TorchScript interpreter.\n # Traceback of TorchScript (most recent call last):\n # File \"<string>\", line 3, in forward\n # def the_method(i0, i1: List[torch.Tensor], i2):\n # return torch.index_put(i0, i1, i2, accumulate=False)\n # ~~~~~~~~~~~~~~~ <--- HERE\n # RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('sort',\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_sort,\n skips=(\n # sort does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError not raised\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),\n )),\n OpInfo('unique',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16),\n sample_inputs_func=sample_inputs_unique,\n supports_out=False,\n supports_autograd=False,\n skips=(\n # RuntimeError:\n # 'Tensor (inferred)' object has no attribute or method 'unique'.:\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return i0.unique(sorted=False, return_inverse=False, return_counts=False, dim=None)\n # ~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('unique_consecutive',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16),\n sample_inputs_func=sample_inputs_unique_consecutive,\n supports_out=False,\n supports_autograd=False,\n skips=(\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_put),\n OpInfo('take',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n check_batched_grad=False, # vmap complains of the sizes\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_take),\n OpInfo('scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter,),\n OpInfo('bfloat16',\n op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bfloat16',\n op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bool',\n op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bool',\n op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('byte',\n op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('byte',\n op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('char',\n op=lambda x, *args, **kwargs: x.char(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('char',\n op=lambda x, *args, **kwargs: x.char(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('double',\n op=lambda x, *args, **kwargs: x.double(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_forward_ad=True,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('double',\n op=lambda x, *args, **kwargs: x.double(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_forward_ad=True,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('float',\n op=lambda x, *args, **kwargs: x.float(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('float',\n op=lambda x, *args, **kwargs: x.float(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('half',\n op=lambda x, *args, **kwargs: x.half(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('half',\n op=lambda x, *args, **kwargs: x.half(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('int',\n op=lambda x, *args, **kwargs: x.int(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('int',\n op=lambda x, *args, **kwargs: x.int(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('long',\n op=lambda x, *args, **kwargs: x.long(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('long',\n op=lambda x, *args, **kwargs: x.long(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('short',\n op=lambda x, *args, **kwargs: x.short(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('short',\n op=lambda x, *args, **kwargs: x.short(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('empty_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_conj_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('zeros_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('ones_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('randn_like',\n dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('rand_like',\n dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('randint_like',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_randint_like,\n supports_autograd=False,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('full_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_full_like,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('new_zeros',\n op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_ones',\n op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_empty',\n op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_conj_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_full',\n op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_full,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('scatter_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter_add,\n supports_out=False\n ),\n OpInfo('stack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_stack,\n assert_autodiffed=True,\n supports_forward_ad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n )),\n OpInfo('hstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n )),\n OpInfo('hypot',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_hypot,\n ),\n OpInfo('histogram',\n dtypes=floating_types(),\n dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU\n sample_inputs_func=sample_inputs_histogram,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # Not Implemented on XLA.\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla'),\n )),\n OpInfo('histogramdd',\n dtypes=floating_types(),\n dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU\n sample_inputs_func=sample_inputs_histogramdd,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('histc',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),\n sample_inputs_func=sample_inputs_histc,\n supports_out=True,\n supports_autograd=False,\n skips=(\n # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor\n # \"AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast\n # from a result of dtype torch.float32 into an out= with dtype torch.long\"\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),\n )),\n OpInfo('bincount',\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_bincount,\n supports_out=False,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bucketize',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_bucketize,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n DecorateInfo(unittest.skip(\"Expected failure!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('searchsorted',\n dtypes=all_types(),\n dtypesIfCPU=all_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_searchsorted,\n supports_autograd=False,\n ref=reference_searchsorted,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.skip(\"Expected failure!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('cat',\n ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),\n aliases=('concat',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_cat_concat,\n supports_forward_ad=True,\n assert_autodiffed=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError: Arguments for call not valid.\n # Expected a value of type 'List[Tensor]' for argument\n # 'tensors' but instead found type 'Tensor (inferred)'.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),\n OpInfo('vstack',\n aliases=('row_stack',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError: _fn() Expected a value of type\n # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),\n OpInfo('dstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n )),\n OpInfo('unfold',\n op=lambda x, *args: x.unfold(*args),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n check_batched_gradgrad=False,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Skip operator schema test because this is a functional and not an operator\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n sample_inputs_func=sample_inputs_unfold),\n OpInfo('msort',\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and(torch.float16),\n check_batched_gradgrad=False,\n skips=(\n # msort does not correctly warn when resizing out= inputs.\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # Expected RuntimeError when doing an unsafe cast from a result of dtype\n # torch.float32 into an out= with dtype torch.long\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),\n ),\n sample_inputs_func=sample_inputs_msort),\n OpInfo('movedim',\n aliases=('moveaxis',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_movedim_moveaxis),\n OpInfo('renorm',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_renorm),\n ShapeFuncInfo('repeat',\n op=lambda x, dims: x.repeat(dims),\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('squeeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_squeeze),\n OpInfo('fill_',\n op=lambda x, scalar: torch.fill_(x.clone(), scalar),\n method_variant=None,\n inplace_variant=torch.Tensor.fill_,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # JIT has issue when op is passed as lambda\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_fill_),\n OpInfo('resize_',\n op=lambda x, shape: x.clone().resize_(shape),\n method_variant=None,\n inplace_variant=torch.Tensor.resize_,\n # the test fails because resize_ doesn't work with imag views as expected by the test\n # https://github.com/pytorch/pytorch/issues/65945\n test_neg_view=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # resize_ is raising an error on input that requires grad on purpose\n DecorateInfo(\n unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),\n 'TestGradients',\n 'test_nondifferentiable',\n ),\n DecorateInfo(unittest.skip(\"Allowed exception\"), 'TestCommon', 'test_composite_compliance'),\n ),\n sample_inputs_func=sample_inputs_resize_ops),\n OpInfo('resize_as_',\n op=lambda x, other: torch.resize_as_(x.clone(), other),\n method_variant=None,\n inplace_variant=torch.Tensor.resize_as_,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # resize_ is raising an error on input that requires grad on purpose\n DecorateInfo(\n unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),\n 'TestGradients',\n 'test_nondifferentiable',\n ),\n ),\n sample_inputs_func=sample_inputs_resize_ops),\n OpInfo('take_along_dim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_take_along_dim,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n ShapeFuncInfo('tile',\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_trapezoid),\n OpInfo('trapezoid',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_trapezoid),\n OpInfo('cumulative_trapezoid',\n dtypes=all_types_and_complex_and(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),\n supports_forward_ad=True,\n supports_out=False,\n sample_inputs_func=sample_cumulative_trapezoid),\n OpInfo('unsqueeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n assert_jit_shape_analysis=True,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n sample_inputs_func=sample_unsqueeze),\n OpInfo('xlogy',\n aliases=('special.xlogy',),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_xlogy),\n OpInfo('zero_',\n op=lambda x: torch.zero_(x.clone()),\n method_variant=None,\n inplace_variant=torch.Tensor.zero_,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n # JIT has issue when op is passed as lambda\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_zero_),\n OpInfo('special.xlog1py',\n aten_name='special_xlog1py',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_xlog1py),\n OpInfo('special.zeta',\n aten_name='special_zeta',\n dtypes=all_types_and(torch.bool),\n supports_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_binary_pwise),\n # OpInfo entry to verify the gradient formula of `other`/`q`\n OpInfo('special.zeta',\n op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),\n aten_name='special_zeta',\n variant_test_name='grad',\n dtypes=all_types_and(torch.bool),\n supports_autograd=True,\n safe_casts_outputs=True,\n skips=(\n # Lambda doesn't work in JIT test\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),\n ),\n sample_inputs_func=sample_inputs_zeta),\n OpInfo('logsumexp',\n aliases=('special.logsumexp',),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_logsumexp),\n OpInfo('trace',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_trace),\n OpInfo('transpose',\n aliases=('swapdims', 'swapaxes'),\n assert_jit_shape_analysis=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_transpose_swapdims),\n OpInfo('T',\n op=lambda x: x.T,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_T),\n OpInfo('H',\n op=lambda x: x.H,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_T),\n OpInfo('mT',\n op=lambda x: x.mT,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_adjoint),\n OpInfo('mH',\n op=lambda x: x.mH,\n aliases=('adjoint',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_adjoint),\n OpInfo('tril',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_tril_triu),\n OpInfo('triu',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_tril_triu),\n OpInfo('kron',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_kron),\n OpInfo('inner',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_inner,\n ),\n OpInfo('tensordot',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_tensordot,\n skips=(\n # Skip operator schema test because this is a functional and not an operator.\n # Reference: https://github.com/pytorch/pytorch/issues/54574\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )\n ),\n OpInfo('to_sparse',\n op=lambda x, *args: x.to_sparse(*args),\n sample_inputs_func=sample_inputs_to_sparse,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n backward_dtypes=floating_types(),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n skips=(\n # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend\n DecorateInfo(unittest.skip(\"\"), 'TestCommon', 'test_noncontiguous_samples'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # JIT has issue when op is passed as lambda\n # NotImplementedError: Cannot access storage of SparseTensorImpl\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Allowed exception: sparse tensors don't have strides\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )\n ),\n OpInfo('logcumsumexp',\n dtypes=floating_types_and(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(),\n skips=(\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),\n ),\n sample_inputs_func=sample_inputs_logcumsumexp),\n UnaryUfuncInfo('sigmoid',\n aliases=('special.expit', 'nn.functional.sigmoid'),\n ref=reference_sigmoid if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.complex64: 1e-1,\n torch.bfloat16: 1e-2}),),\n skips=(\n # TODO: FIXME: sigmoid fails on complex inputs that require grad\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # Reference: https://github.com/pytorch/pytorch/issues/56012\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n # alias, nn.functional.sigmoid, will produce (because of warning string saved):\n # \"RuntimeError: Expected to not find \"sigmoid\" but found it\"\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n assert_autodiffed=True,\n # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (close_to_int(x / (math.pi * 1j))\n if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),\n safe_val=0)),\n UnaryUfuncInfo('digamma',\n ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,\n aliases=('special.psi', 'special.digamma',),\n decorators=(precisionOverride({torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n supports_forward_ad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.entr',\n ref=scipy.special.entr if TEST_SCIPY else _NOTHING,\n aten_name='special_entr',\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.float16: 1e-1,\n torch.bfloat16: 1e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16, torch.float16]),\n ),\n supports_inplace_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_entr),\n UnaryUfuncInfo('special.ndtri',\n ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aten_name='special_ndtri',\n dtypes=all_types_and(torch.bool),\n safe_casts_outputs=True),\n UnaryUfuncInfo('erf',\n ref=scipy.special.erf if TEST_SCIPY else _NOTHING,\n aliases=('special.erf', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n\n ),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfc',\n ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,\n aliases=('special.erfc', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfinv',\n ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,\n aliases=('special.erfinv', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2,\n torch.float32: 1e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_sparse_csr=True,\n domain=(-1, 1),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n )),\n UnaryUfuncInfo('lgamma',\n ref=reference_lgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.gammaln', ),\n decorators=(precisionOverride({torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n supports_forward_ad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n ),\n safe_casts_outputs=True,\n # lgamma have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n OpInfo(\n 'logdet',\n dtypes=floating_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_logdet,\n decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),\n # `log_softmax` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n OpInfo(\n 'log_softmax',\n aliases=('special.log_softmax', 'nn.functional.log_softmax'),\n supports_out=False,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_autodiffed=True),\n OpInfo(\n 'log_softmax',\n variant_test_name='dtype',\n aliases=('special.log_softmax', 'nn.functional.log_softmax'),\n supports_out=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=True),\n UnaryUfuncInfo('logit',\n ref=scipy.special.logit if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aliases=('special.logit', ),\n supports_forward_ad=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_logit,\n safe_casts_outputs=True),\n OpInfo('where',\n # Currently only the `input` is tested in gradcheck.\n # If we pass `condition` first, none of the input which supports\n # autograd will be tested. Hence the following lambda.\n op=lambda self, condition, other: torch.where(condition, self, other),\n sample_inputs_func=sample_inputs_where,\n supports_out=False,\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true :\n # Failure in testing nodes' autodifferentiation.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),\n OpInfo('nonzero',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_nonzero,\n supports_autograd=False,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67458\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # nonzero is not raising a warning when the out is resized\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n # `torch.norm` has multiple code paths depending on the value of `p`.\n # These paths have different dtype support. Also JIT supports,\n # most variants but not all of them. So we split the OpInfo entries,\n # for `norm` based on the code-paths and JIT support.\n OpInfo('norm',\n sample_inputs_func=sample_inputs_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16)),\n OpInfo('norm',\n variant_test_name='nuc',\n sample_inputs_func=sample_inputs_norm_nuc,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types(),\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # RuntimeError not raised :\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n # RuntimeError:\n # Arguments for call are not valid.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950\n )\n ),\n OpInfo('norm',\n variant_test_name='fro',\n sample_inputs_func=sample_inputs_norm_fro,\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n # Arguments for call are not valid.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950\n )),\n OpInfo('norm',\n variant_test_name='inf',\n sample_inputs_func=sample_inputs_norm_inf,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n skips=(\n # https://github.com/pytorch/pytorch/issues/67517\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # following 2 tests failed intermittenly\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950\n )\n ),\n OpInfo('t',\n sample_inputs_func=sample_inputs_t,\n supports_out=False,\n supports_forward_ad=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n assert_autodiffed=True,),\n UnaryUfuncInfo('special.erfcx',\n ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,\n aten_name='special_erfcx',\n decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),\n dtypes=all_types_and(torch.bool),\n safe_casts_outputs=True),\n OpInfo(\n \"nn.functional.dropout\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # inplace variant dispatches to dropout kernel, while on CUDA\n # the op dispatches to _fused_dropout (with a few more conditions)\n # hence, different values and this skip here\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view', device_type='cuda'),\n # On CUDA, the op is dispatched (and a few more conditions) to\n # _fused_dropout, which doesn't support forward AD\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda'),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_dropout,\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),\n OpInfo(\n \"nn.functional.feature_alpha_dropout\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_dropout,\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),\n OpInfo(\n \"nn.functional.one_hot\",\n ref=reference_one_hot,\n supports_out=False,\n dtypes=_dispatch_dtypes((torch.int64,)),\n sample_inputs_func=sample_inputs_one_hot,\n ),\n OpInfo(\n \"nn.functional.embedding\",\n # We use lambda to reshuffle the positional arguments.\n # This is because currently only the `input` field of SampleInput\n # is tested in gradient tests.\n op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_embedding,\n skips=(\n # Does not work with lambda\n # Raises : JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # Reference: https://github.com/pytorch/pytorch/issues/67084\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view', device_type='cuda'),\n ),\n supports_out=False,\n ),\n OpInfo(\n \"nn.functional.embedding_bag\",\n # We use lambda to reshuffle the positional arguments.\n # This is because currently only the `input` field of SampleInput\n # is tested in gradient tests.\n op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n # backward is not supported for mode `max` and dtype `bfloat16`\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n sample_inputs_func=sample_inputs_embedding_bag,\n skips=(\n # Does not work with lambda\n # Raises : JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n supports_gradgrad=False,\n ),\n OpInfo(\n \"nn.functional.softplus\",\n ref=reference_softplus,\n sample_inputs_func=sample_inputs_softplus,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n supports_out=False,\n ),\n OpInfo(\n \"linalg.tensorinv\",\n ref=np.linalg.tensorinv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_tensorinv,\n supports_forward_ad=True,\n decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],\n ),\n OpInfo(\n \"linalg.tensorsolve\",\n ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_tensorsolve,\n supports_forward_ad=True,\n decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],\n ),\n OpInfo(\n \"nn.functional.mse_loss\",\n ref=reference_mse_loss,\n sample_inputs_func=sample_inputs_mse_loss,\n supports_out=False,\n dtypes=floating_types_and(torch.float16),\n backward_dtypesIfCPU=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n skips=(\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.grid_sample\",\n ref=_NOTHING,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_grid_sample,\n supports_gradgrad=False,\n gradcheck_nondet_tol=1e-15),\n OpInfo(\n \"argwhere\",\n ref=np.argwhere,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argwhere,\n ),\n ReductionOpInfo(\n 'all',\n identity=True,\n supports_multiple_dims=False,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.bool,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.all),\n skips=(\n # FIXME: does not support passing keepdim without dim\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: does not support dim=None\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: uint8 input returns uint8 instead of bool\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),\n ),\n ),\n ReductionOpInfo(\n 'any',\n identity=False,\n supports_multiple_dims=False,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.bool,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.any),\n skips=(\n # FIXME: does not support passing keepdim without dim\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: does not support dim=None\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: uint8 input returns uint8 instead of bool\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),\n ),\n ),\n ReductionOpInfo(\n 'amax',\n nan_policy='propagate',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n ref=reference_reduction_numpy(np.amax),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'amin',\n nan_policy='propagate',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n ref=reference_reduction_numpy(np.amin),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'argmax',\n supports_multiple_dims=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),\n skips=(\n # FIXME: keepdim parameter is ignored when dim=None\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'argmin',\n supports_multiple_dims=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),\n skips=(\n # FIXME: keepdim parameter is ignored when dim=None\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'count_nonzero',\n identity=0,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_reduction_count_nonzero,\n ref=reference_reduction_numpy(np.count_nonzero),\n skips=(\n # FIXME: count_nonzero does not accept keepdim kwarg\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_single_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_multi_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_offbounds_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n ),\n ),\n ReductionOpInfo(\n 'mean',\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n promotes_int_to_float=True,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.mean),\n skips=(\n # FIXME: mean does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: mean reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: mean does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_noncontiguous_all',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_extremal_values',\n device_type='cuda', dtypes=[torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'nanmean',\n nan_policy='omit',\n assert_autodiffed=True,\n promotes_int_to_float=True,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),\n ref=reference_reduction_numpy(np.nanmean),\n skips=(\n # AssertionError: False is not true :\n # Failure in testing nodes' autodifferentiation.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # FIXME: prod reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_noncontiguous_all',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n device_type='cuda', dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_extremal_values',\n device_type='cuda', dtypes=[torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'std',\n nan_policy='propagate',\n supports_out=False,\n assert_autodiffed=True,\n promotes_int_to_float=True,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n ref=reference_std_var(np.std),\n generate_args_kwargs=generate_std_var_kwargs,\n skips=(\n # FIXME: cannot specify keepdim without dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: dim=None not supported\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # TODO(@heitorschueroff) std return float for complex types\n # need to find a better way to model result dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_result_dtype'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values'),\n # NumPy is giving NaN for this\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_large_input'),\n ),\n ),\n ReductionOpInfo(\n 'var',\n nan_policy='propagate',\n supports_out=False,\n assert_autodiffed=True,\n promotes_int_to_float=True,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n ref=reference_std_var(np.var),\n generate_args_kwargs=generate_std_var_kwargs,\n skips=(\n # FIXME: cannot specify keepdim without dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: dim=None not supported\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # TODO(@heitorschueroff) std return float for complex types\n # need to find a better way to model result dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_result_dtype'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values'),\n # NumPy is giving NaN for this\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_large_input'),\n ),\n ),\n ReductionOpInfo(\n 'prod',\n identity=1,\n nan_policy='propagate',\n supports_multiple_dims=False,\n supports_out=False,\n promotes_int_to_int64=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_prod,\n ref=reference_reduction_numpy(np.prod),\n skips=(\n # FIXME: prod does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: prod reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: prod does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16, torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.uint8, torch.float16, torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'sum',\n identity=0,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n promotes_int_to_int64=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.sum),\n skips=(\n # FIXME: sum does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: sum does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_noncontiguous_all',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.float16]),\n ),\n ),\n ReductionOpInfo(\n 'nansum',\n identity=0,\n nan_policy='omit',\n supports_out=False,\n promotes_int_to_int64=True,\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.nansum),\n skips=(\n # FIXME: nansum does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: nansum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: nansum does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_noncontiguous_all',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.float16]),\n ),\n ),\n ReductionOpInfo(\n '_masked.sum',\n ref=reference_reduction_numpy(np.sum),\n method_variant=None,\n identity=0,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_int64=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),\n 'TestReductions', 'test_ref_small_input'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction\n ),\n ReductionOpInfo(\n '_masked.prod',\n ref=reference_reduction_numpy(np.prod),\n method_variant=None,\n identity=1,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_int64=True,\n # FIXME: \"prod_cpu\" not implemented for 'BFloat16'\n # FIXME: \"prod_cpu\" not implemented for 'Half'\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_ref_duplicate_values'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction\n ),\n ReductionOpInfo(\n '_masked.amax',\n nan_policy='propagate',\n supports_out=False,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.amax),\n skips=(\n # FIXME: amax reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: Unknown builtin op: aten::iinfo\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.amin',\n nan_policy='propagate',\n supports_out=False,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.amin),\n skips=(\n # FIXME: amax reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: Unknown builtin op: aten::iinfo\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.mean',\n ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_float=True,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.norm',\n identity=0,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_float=True,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # torch.jit.frontend.NotSupportedError: Compiled functions\n # can't take variable number of arguments or use\n # keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_norm,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.var',\n ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_float=True,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestMasked', 'test_reference_masked'),\n ],\n sample_inputs_func=sample_inputs_masked_var,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n OpInfo(\n '_masked.softmax',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.log_softmax',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestMasked', 'test_reference_masked'),\n ],\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.softmin',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.normalize',\n method_variant=None,\n dtypes=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_normalize,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # RuntimeError: \"clamp_min_cpu\" not implemented for 'Half'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMasked', 'test_reference_masked',\n device_type='cpu', dtypes=[torch.half]),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n \"nn.functional.ctc_loss\",\n ref=_NOTHING,\n dtypes=floating_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_ctc_loss,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67462\n # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_grad\",\n dtypes=(torch.float64,),\n ),\n # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_gradgrad\",\n dtypes=(torch.float64,),\n ),\n # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32,),\n ),\n # Operation calls data_ptr() somewhere; needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo(\n \"nn.functional.cosine_embedding_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_cosine_embedding_loss,\n ),\n OpInfo(\n \"nn.functional.nll_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_nll_loss,\n skips=(\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0, i1):\n # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.gaussian_nll_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_gaussian_nll_loss,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.hinge_embedding_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_hinge_embedding_loss,\n ),\n OpInfo(\n \"nn.functional.huber_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_huber_loss,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n )\n ),\n OpInfo(\n \"nn.functional.poisson_nll_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_poisson_nll_loss,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67461\n # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_grad\",\n dtypes=(torch.float64,),\n ),\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_gradgrad\",\n dtypes=(torch.float64,),\n ),\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_forward_mode_AD\",\n dtypes=(torch.float64,),\n ),\n ),\n ),\n OpInfo(\n \"argsort\",\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_argsort,\n supports_out=False,\n supports_autograd=False,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32,),\n ),\n ),\n ),\n OpInfo(\n \"repeat_interleave\",\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_repeat_interleave,\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pairwise_distance\",\n ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (\n np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)\n ),\n sample_inputs_func=sample_inputs_pairwise_distance,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pixel_shuffle\",\n sample_inputs_func=sample_inputs_pixel_shuffle,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pixel_unshuffle\",\n sample_inputs_func=sample_inputs_pixel_unshuffle,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n )\n]\n\n# Common operator groupings\nunary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]\nbinary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]\nspectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]\nsparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]\nsparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]\nshape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]\nreduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]\nreference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)]\nreference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')]\n\n# TODO: review porting these to make_tensor\ndef index_variable(shape, max_indices, device=torch.device('cpu')):\n if not isinstance(shape, tuple):\n shape = (shape,)\n index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()\n return index\n\ndef gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):\n assert len(shape) == 2\n assert index_dim < 2\n batch_dim = 1 - index_dim\n index = torch.zeros(*shape, dtype=torch.long, device=device)\n for i in range(shape[index_dim]):\n index.select(index_dim, i).copy_(\n torch.randperm(max_indices, device=device)[:shape[batch_dim]])\n if duplicate:\n index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))\n return index\n\ndef bernoulli_scalar():\n return torch.tensor(0, dtype=torch.bool).bernoulli_()\n\ndef mask_not_all_zeros(shape):\n assert len(shape) > 0\n while True:\n result = torch.randn(shape).gt(0)\n if result.sum() > 0:\n return result\n\n\n# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove\n# these from here\ndef _compare_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n if row == 0 or col == 0:\n # have to handle this separately as tril and triu does not take\n # empty matrix as input\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n else:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .triu(offset).nonzero().to(dtype).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n\ndef _compare_large_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.tril_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.triu_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n# (\n# row\n# col\n# offset (optional)\n# dtype (optional)\n# )\ntri_tests_args = [\n (1, 1),\n (3, 3),\n (3, 3, 1),\n (3, 3, 2),\n (3, 3, 200),\n (3, 3, -1),\n (3, 3, -2),\n (3, 3, -200),\n (0, 3, 0),\n (0, 3, 1),\n (0, 3, -1),\n (3, 0, 0),\n (3, 0, 1),\n (3, 0, -1),\n (0, 0, 0),\n (0, 0, 1),\n (0, 0, -1),\n (3, 6, 0),\n (3, 6, 1),\n (3, 6, 3),\n (3, 6, 9),\n (3, 6, -1),\n (3, 6, -3),\n (3, 6, -9),\n (6, 3, 0),\n (6, 3, 1),\n (6, 3, 3),\n (6, 3, 9),\n (6, 3, -1),\n (6, 3, -3),\n (6, 3, -9),\n (258, 253, 1, torch.float32),\n (257, 258, 1, torch.float64),\n (258, 258, 1, torch.short),\n (3, 513, 1, torch.long),\n (513, 3, 1, torch.int),\n (513, 0, 1, torch.double),\n (1024, 1024),\n (1024, 1024, 500, torch.float32),\n (1024, 1024, 1023),\n (1024, 1024, -500),\n (1023, 1025),\n (1025, 1023, 1022),\n (1024, 1024, -500),\n (3, 2028),\n (3, 2028, 1),\n (3, 2028, -1),\n (2028, 3),\n (2028, 1),\n (2028, 1, -1)\n]\n\ntri_large_tests_args: List[Tuple[int, ...]] = [\n # Large test cases below are deliberately commented out to speed up CI\n # tests and to avoid OOM error. When modifying implementations of\n # tril_indices and triu_indices, please enable these tests and make sure\n # they pass.\n #\n # (1, 268435455),\n # (5000, 5000),\n # (10000, 10000),\n # (268435455, 1),\n # (134217727, 2, 1),\n # (2, 134217727, 1),\n # (536870901, 1),\n # (1, 536870901),\n # (268435455, 2, 1),\n # (2, 268435455, 1)\n]\n\n\ndef run_additional_tri_tests(self, device):\n x = torch.ones(\n 3, 3, dtype=torch.long, device=device, layout=torch.strided)\n l = x.tril(0).nonzero().transpose(0, 1)\n u = x.triu(0).nonzero().transpose(0, 1)\n self.assertEqual(l, torch.tril_indices(3, 3, device=device))\n self.assertEqual(\n l, torch.tril_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertEqual(u, torch.triu_indices(3, 3, device=device))\n self.assertEqual(\n u, torch.triu_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.triu_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.tril_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n# TODO: move into common_utils.py or the test suite(s) that use this\ndef unpack_variables(args):\n if isinstance(args, tuple):\n return tuple(unpack_variables(elem) for elem in args)\n else:\n return args\n\n\nclass dont_convert(tuple):\n pass\n\n\nnon_differentiable = collections.namedtuple('non_differentiable', ['tensor'])\n\n\n# TODO: move into common_utils.py or the test suite(s) that use this\ndef create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):\n if not isinstance(call_args, tuple):\n call_args = (call_args,)\n\n def map_arg(arg):\n def maybe_non_contig(tensor):\n return tensor if not non_contiguous else make_non_contiguous(tensor)\n\n def conjugate(tensor):\n return tensor.conj()\n\n if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):\n return arg\n elif isinstance(arg, tuple) and len(arg) == 0:\n var = conjugate(torch.randn((), dtype=dtype, device=device))\n var.requires_grad = requires_grad\n return var\n elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):\n return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)\n # double check casting\n elif isinstance(arg, non_differentiable):\n if isinstance(arg.tensor, torch.Tensor):\n if arg.tensor.dtype == torch.float:\n return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))\n if arg.tensor.dtype == torch.cfloat:\n return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))\n return conjugate(maybe_non_contig(arg.tensor.to(device=device)))\n return conjugate(maybe_non_contig(arg.tensor.to(device=device)))\n elif isinstance(arg, torch.Tensor):\n if arg.dtype == torch.float:\n arg = arg.double()\n if arg.dtype == torch.cfloat:\n arg = arg.to(torch.cdouble)\n if arg.is_complex() != dtype.is_complex:\n raise RuntimeError(\"User provided tensor is real for a test that runs with complex dtype, \",\n \"which is not supported for now\")\n # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards\n v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()\n v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())\n return v\n elif callable(arg):\n return map_arg(arg(dtype=dtype, device=device))\n else:\n return arg\n args_out = tuple(map_arg(arg) for arg in call_args)\n kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}\n return args_out, kwargs_out\n"
]
| [
[
"numpy.product",
"numpy.random.choice",
"torch.triu_indices",
"numpy.sign",
"torch.linalg.cholesky",
"torch.testing._internal.common_utils.is_iterable_of_tensors",
"torch.lu_unpack",
"numpy.prod",
"torch.tensor",
"torch.testing._internal.common_utils.make_fullrank_matrices_with_distinct_singular_values",
"numpy.cross",
"numpy.expand_dims",
"torch.nn.functional.selu",
"torch.as_strided",
"torch.testing._internal.common_dtype.all_types",
"numpy.diff",
"torch.cuda.empty_cache",
"torch.testing._internal.common_device_type.precisionOverride",
"torch.testing._internal.common_dtype.floating_types",
"torch.testing.make_non_contiguous",
"numpy.linalg.tensorsolve",
"torch.testing._internal.common_dtype.floating_types_and",
"torch.testing._internal.common_utils.random_well_conditioned_matrix",
"numpy.lib.NumpyVersion",
"torch.einsum",
"torch.testing._internal.common_dtype.floating_and_complex_types_and",
"torch.nn.functional.elu",
"numpy.mean",
"numpy.sinc",
"numpy.multiply",
"torch.nn.functional.celu",
"torch.testing._internal.common_dtype._dispatch_dtypes",
"torch.topk",
"torch.Size",
"torch.testing._internal.common_device_type.tol",
"torch.testing._internal.common_utils.random_symmetric_pd_matrix",
"torch.abs",
"numpy.tanh",
"torch.randint",
"numpy.arange",
"torch.zeros",
"torch.device",
"numpy.matmul",
"numpy.zeros",
"torch.testing._internal.common_dtype.integral_types_and",
"numpy.modf",
"torch.testing._internal.common_utils.random_hermitian_pd_matrix",
"torch.rand",
"torch.testing._internal.common_dtype.empty_types",
"torch.all",
"torch.frac",
"torch.view_as_real",
"torch.testing._internal.common_dtype.all_types_and",
"torch.testing.make_tensor",
"numpy.where",
"torch.igammac",
"torch.where",
"torch.special.zeta",
"torch.nn.functional.embedding_bag",
"torch.tril_indices",
"torch.manual_seed",
"torch.testing._internal.common_utils.random_symmetric_matrix",
"torch._masked._input_mask",
"torch.testing._internal.common_dtype.all_types_and_complex_and",
"torch.testing._internal.common_utils.random_square_matrix_of_rank",
"torch.igamma",
"torch.testing._internal.common_utils.random_fullrank_matrix_distinct_singular_value",
"numpy.heaviside",
"torch.nn.functional.embedding",
"torch.linalg.pinv",
"numpy.amax",
"numpy.put",
"numpy.searchsorted",
"torch.testing._internal.common_dtype.floating_and_complex_types",
"torch.get_default_dtype",
"numpy.asarray",
"torch.special.polygamma",
"numpy.sum",
"torch.no_grad",
"numpy.abs",
"numpy.maximum",
"torch.polygamma",
"numpy.minimum",
"torch.linalg.svd",
"torch.finfo",
"torch.randperm",
"numpy.exp",
"torch.ones",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.linalg.matrix_norm",
"torch.testing._internal.common_utils.noncontiguous_like",
"numpy.concatenate",
"torch.testing._internal.common_utils.random_symmetric_psd_matrix",
"torch._masked._output_mask",
"numpy.sqrt",
"torch.empty",
"torch.testing._internal.common_dtype.double_types",
"torch.testing._internal.common_dtype.all_types_and_complex",
"torch.full",
"numpy.stack",
"numpy.clip",
"numpy.amin",
"torch.sort",
"numpy.add",
"torch.arange",
"torch.testing._internal.common_device_type.has_cusolver",
"torch.testing._internal.common_dtype.complex_types",
"torch.nn.functional.silu",
"torch.linalg.lstsq",
"torch.broadcast_shapes",
"torch.randn"
]
]
|
TruthK/Informer2020 | [
"085debb00c12418f6f51bae5609e24ee5ed5b882"
]
| [
"utils/imputer.py"
]
| [
"## 图像显示中文的问题\nimport matplotlib\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.preprocessing import StandardScaler\n\nmatplotlib.rcParams['axes.unicode_minus'] = False\nimport seaborn as sns\n\nsns.set(font=\"Kaiti\", style=\"ticks\", font_scale=1.4)\n\n## 导入本小节会使用到的包\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.impute import KNNImputer\nfrom sklearn import metrics\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\n\n\n\ndef Multivariate_Interpolation(v):\n imp = IterativeImputer(estimator=ExtraTreesRegressor(), max_iter=30, random_state=1997)\n imp.fit(v)\n IteraList = imp.transform(v)\n IteraNp = np.array(IteraList)\n return IteraNp\n\n\n## 读取用于演示的数据集\ndf = pd.read_csv(\"../data/ETT/ETTh1_miss.csv\", header=None)\ndf_orgin = pd.read_csv(\"../data/ETT/ETTh1.csv\", header=None)\n\n\nss = StandardScaler()\n\ndf = df.iloc[1:, 1:]\ndata = df.values\nres = Multivariate_Interpolation(data)\n\norgin_data = df_orgin.iloc[1:, 1:].values\npd.DataFrame(res).to_csv(\"ex_ett.csv\", index=False)\nprint('------------------mse--------------------------')\nprint(metrics.mean_squared_error(ss.fit_transform(orgin_data), ss.fit_transform(res)))\nprint('-------------------mae------------------------------')\nprint(metrics.mean_absolute_error(ss.fit_transform(orgin_data), ss.fit_transform(res)))\n"
]
| [
[
"numpy.array",
"sklearn.preprocessing.StandardScaler",
"sklearn.ensemble.ExtraTreesRegressor",
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
BerryAI/Acai | [
"a3dc7d29c3ca4df00817e7ee94440c9e947ebcb4"
]
| [
"OpenMRS/cf/cf_hidden_feature.py"
]
| [
"\"\"\"\r\n cf_hidden_feature.py\r\n ~~~\r\n This module contains collaborative filtering algorithm, based on matrix\r\n factorization to find hidden features in user rating. It includes SVD and\r\n Gradient Descent methods.\r\n\r\n :auther: Alexander Z Wang\r\n\"\"\"\r\n\r\nimport numpy\r\nimport json\r\n\r\n\r\ndef full_rating_matrix_with_index(user_rate_dict):\r\n \"\"\"Get full rating matrix with song index at each row\r\n\r\n :param user_rate_dict: user rate score dictionary (sparse)\r\n :return rating_matrix: full matrix of rating scores\r\n :rtype: dictionary\r\n \"\"\"\r\n\r\n user_index = dict()\r\n song_index = dict()\r\n\r\n user_count = 0\r\n song_count = 0\r\n for user in user_rate_dict:\r\n if user not in user_index:\r\n user_index[user] = user_count\r\n user_count += 1\r\n for track_key in user_rate_dict[user]:\r\n if track_key not in song_index:\r\n song_index[track_key] = song_count\r\n song_count += 1\r\n\r\n rating_matrix = [None]*len(user_index)\r\n\r\n for user in user_rate_dict:\r\n rating_vector = [0.0] * len(song_index)\r\n for track_key in user_rate_dict[user]:\r\n rating_vector[song_index[track_key]] = user_rate_dict[user][\r\n track_key]\r\n rating_matrix[user_index[user]] = rating_vector\r\n\r\n rating_matrix = numpy.array(rating_matrix)\r\n matrix_update_by_song_mean_rate(rating_matrix)\r\n\r\n return user_index, song_index, rating_matrix\r\n\r\n\r\ndef matrix_update_by_song_mean_rate(rating_matrix):\r\n \"\"\"Update rating score with average score\r\n\r\n :param rating_matrix: full matrix of rating scores\r\n \"\"\"\r\n\r\n for i in range(0, len(rating_matrix[0])):\r\n index = rating_matrix[:, i] > 0\r\n ave_score = float(numpy.sum(rating_matrix[:, i])) / float(\r\n numpy.sum(index))\r\n for j in range(0, len(rating_matrix)):\r\n if rating_matrix[j][i] == 0.0:\r\n rating_matrix[j][i] = ave_score\r\n\r\n\r\ndef get_hidden_feature_matrix_SVD(user_rate_dict, k):\r\n \"\"\"Get hidden feature matrix by SVD method\r\n\r\n :param user_rate_dict: each user's rating score\r\n :param k: number of hidden features\r\n :return data: hidden feature dataset\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n user_index, song_index, rating_matrix = full_rating_matrix_with_index(\r\n user_rate_dict)\r\n\r\n U, s, V = numpy.linalg.svd(rating_matrix, full_matrices=True)\r\n\r\n V_bar = V[0:k]\r\n for i in range(0, k):\r\n V_bar[i] = s[i] * V_bar[i]\r\n hidden_feature = V_bar\r\n user_weight = U[:, 0:k]\r\n\r\n return user_weight, hidden_feature\r\n\r\n\r\ndef update_residue(rating_matrix, rate_bar):\r\n \"\"\"update residue matrix for each iteration in Gradient descent method\r\n\r\n :param rating_matrix: users' rating matrix\r\n :param rate_bar: rating matrix generate by approximation in each GD step\r\n :return residue: residue matrix, rating_matrix - rate_bar\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n residue = rating_matrix - rate_bar\r\n index = (rating_matrix == 0)\r\n residue[index] = 0\r\n\r\n return residue\r\n\r\n\r\ndef stochastic_GD(rating_matrix, lean_rate, lambda_rate, k, max_iter):\r\n \"\"\"Stochastic Gradient Descent method\r\n\r\n :param rating_matrix: filename of unique MSD tracks\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param k: number of hidden features\r\n :param max_iter: maximum iteration steps in gradient descent method\r\n :return user_weight: user weight matrix\r\n :return hidden_feature: hidden feature matrix\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n m = len(rating_matrix)\r\n n = len(rating_matrix[0])\r\n\r\n user_weight = numpy.random.rand(m, k)\r\n hidden_feature = numpy.random.rand(n, k)\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n res_norm = numpy.linalg.norm(residue)\r\n res_norm_list = [res_norm]\r\n\r\n for h in range(0, max_iter):\r\n\r\n user_weight = lean_rate*residue.dot(hidden_feature) + (\r\n 1 - lean_rate*lambda_rate)*user_weight\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n hidden_feature = lean_rate*residue.T.dot(user_weight) + (\r\n 1 - lean_rate*lambda_rate)*hidden_feature\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n res_norm = numpy.linalg.norm(residue)\r\n res_norm_list.append(res_norm)\r\n\r\n if res_norm < 0.01:\r\n break\r\n\r\n return user_weight, hidden_feature, res_norm_list\r\n\r\n\r\ndef stochastic_GD_with_ini(rating_matrix, user_weight, lean_rate,\r\n hidden_feature, lambda_rate, max_iter):\r\n \"\"\"Stochastic Gradient Descent method with given initail guess\r\n\r\n :param rating_matrix: filename of unique MSD tracks\r\n :param user_weight: user weight matrix\r\n :param hidden_feature: hidden feature matrix\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param k: number of hidden features\r\n :return user_weight: user weight matrix\r\n :return hidden_feature: hidden feature matrix\r\n :return full_iteration: flag of iteration status\r\n :return res_norm_list: list of error norm of each iteration\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n res_norm = numpy.linalg.norm(residue)\r\n res_norm_old = res_norm\r\n res_norm_list = []\r\n\r\n full_iteration = 1\r\n\r\n for h in range(0, max_iter):\r\n\r\n user_weight = lean_rate*residue.dot(hidden_feature) + (\r\n 1 - lean_rate*lambda_rate)*user_weight\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n hidden_feature = lean_rate*residue.T.dot(user_weight) + (\r\n 1 - lean_rate*lambda_rate)*hidden_feature\r\n\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n\r\n res_norm = numpy.linalg.norm(residue)\r\n res_norm_list.append(res_norm)\r\n\r\n if res_norm > res_norm_old:\r\n full_iteration = 0\r\n break\r\n if res_norm < 0.01:\r\n full_iteration = 2\r\n break\r\n res_norm_old = res_norm\r\n\r\n return user_weight, hidden_feature, res_norm_list, full_iteration\r\n\r\n\r\ndef stochastic_GD_r(rating_matrix, lean_rate, lambda_rate, k,\r\n max_iter_inloop, max_iter_outloop):\r\n \"\"\"Stochastic Gradient Descent method with flexible learner rate\r\n\r\n :param rating_matrix: filename of unique MSD tracks\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param k: number of hidden features\r\n :return user_weight: user weight matrix\r\n :return hidden_feature: hidden feature matrix\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n user_weight, hidden_feature, res_norm_list = stochastic_GD(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter_inloop)\r\n\r\n full_success = 1\r\n\r\n for i in range(0, max_iter_outloop):\r\n\r\n if full_success == 2:\r\n break\r\n if full_success == 1:\r\n lean_rate = 2*lean_rate\r\n if full_success == 0:\r\n lean_rate = lean_rate/2\r\n\r\n (user_weight, hidden_feature,\r\n res_norm_list_tmp, full_success) = stochastic_GD_with_ini(\r\n rating_matrix, user_weight, lean_rate,\r\n hidden_feature, lambda_rate, max_iter_outloop)\r\n\r\n res_norm_list = res_norm_list + res_norm_list_tmp\r\n\r\n return user_weight, hidden_feature, res_norm_list\r\n\r\n\r\ndef batch_GD(rating_matrix, lean_rate, lambda_rate, k, max_iter):\r\n \"\"\"Batch Gradient Descent method\r\n\r\n :param rating_matrix: filename of unique MSD tracks\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param k: number of hidden features\r\n :return user_weight: user weight matrix\r\n :return hidden_feature: hidden feature matrix\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n residue = numpy.copy(rating_matrix)\r\n\r\n res_norm_old = numpy.linalg.norm(residue)\r\n res_norm_new = res_norm_old\r\n res_norm_list = [res_norm_old]\r\n\r\n m = len(rating_matrix)\r\n n = len(rating_matrix[0])\r\n\r\n user_weight = numpy.random.rand(m, k)\r\n hidden_feature = numpy.random.rand(n, k)\r\n\r\n columns = (residue != 0).sum(0)\r\n rows = (residue != 0).sum(1)\r\n diag_n = numpy.diag(1 - lean_rate*lambda_rate*columns)\r\n diag_m = numpy.diag(1 - lean_rate*lambda_rate*rows)\r\n\r\n for h in range(0, max_iter):\r\n user_weight = diag_m.dot(user_weight)\r\n user_weight += lean_rate * numpy.dot(residue, hidden_feature)\r\n hidden_feature = diag_n.dot(hidden_feature)\r\n hidden_feature += lean_rate * residue.T.dot(user_weight)\r\n rate_bar = user_weight.dot(hidden_feature.T)\r\n residue = update_residue(rating_matrix, rate_bar)\r\n res_norm_new = numpy.linalg.norm(residue)\r\n res_norm_list.append(res_norm_new)\r\n if res_norm_old < 1.0:\r\n break\r\n res_norm_old = res_norm_new\r\n\r\n return user_weight, hidden_feature\r\n\r\n\r\ndef get_hidden_feature_matrix_GD(\r\n user_rate_dict, k, lean_rate, lambda_rate, max_iter, GD_method):\r\n \"\"\"Get hidden feature matrix by stochastic gradient descent method\r\n\r\n :param user_rate_dict: user rating matrix\r\n :param k: number of hidden features\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param max_iter: max iteration steps in GD\r\n :param method: number of the method\r\n :return user_weight: user weight matrix\r\n :return hidden_feature: hidden feature matrix\r\n :rtype: ndarray\r\n \"\"\"\r\n\r\n user_index, song_index, rating_matrix = full_rating_matrix_with_index(\r\n user_rate_dict)\r\n if GD_method == 1:\r\n user_weight, hidden_feature, res_norm = stochastic_GD(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n if GD_method == 2:\r\n user_weight, hidden_feature, res_norm = stochastic_GD_r(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n if GD_method == 3:\r\n user_weight, hidden_feature, res_norm = batch_GD(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n\r\n return user_weight, hidden_feature, res_norm, user_index, song_index\r\n\r\n\r\ndef write_hidden_feature_to_file(hf_filename, hidden_feature, song_index):\r\n \"\"\"Write hidden features to a Json file\r\n\r\n :param hf_filename: filename for hidden feature matrix\r\n :param hidden_feature: hidden feature matrix\r\n :param song_index: index of song in hidden feature matrix\r\n \"\"\"\r\n\r\n inv_song_index = dict((v, k) for k, v in song_index.iteritems())\r\n data = dict()\r\n\r\n for key in inv_song_index:\r\n data[inv_song_index[key]] = hidden_feature[key].tolist()\r\n\r\n with open(hf_filename, 'w') as outfile:\r\n json.dump(data, outfile)\r\n\r\n\r\ndef get_user_profile(\r\n user_rate_dict, k, lean_rate, lambda_rate, max_iter, GD_method):\r\n \"\"\"Get user profile of hidden feature weight by gradient descent method\r\n\r\n :param user_rate_dict: user rating matrix\r\n :param k: number of hidden features\r\n :param lean_rate: learner rate\r\n :param lambda_rate: lambda rate\r\n :param max_iter: max iteration steps in GD\r\n :param method: number of the method\r\n :return user_profile: user weight profile\r\n :rtype: dictionary\r\n \"\"\"\r\n\r\n user_profile = dict()\r\n\r\n user_index, song_index, rating_matrix = full_rating_matrix_with_index(\r\n user_rate_dict)\r\n if GD_method == 1:\r\n user_weight, hidden_feature, res_norm = stochastic_GD(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n if GD_method == 2:\r\n user_weight, hidden_feature, res_norm = stochastic_GD_r(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n if GD_method == 3:\r\n user_weight, hidden_feature, res_norm = batch_GD(\r\n rating_matrix, lean_rate, lambda_rate, k, max_iter)\r\n\r\n for user in user_index:\r\n line_number = user_index[user]\r\n weight_tmp = user_weight[line_number, :].tolist()\r\n user_profile[user] = weight_tmp\r\n\r\n return user_profile\r\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.dot",
"numpy.sum",
"numpy.copy",
"numpy.linalg.svd",
"numpy.diag"
]
]
|
ParikhKadam/fairseq | [
"f862ff5137cf03ec8769b6e8f19c9dcdfe08ff91"
]
| [
"fairseq/data/data_utils.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\nimport contextlib\nimport itertools\nimport logging\nimport re\nimport warnings\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom fairseq.file_io import PathManager\nfrom fairseq import utils\nimport os\n\nlogger = logging.getLogger(__name__)\n\n\ndef infer_language_pair(path):\n \"\"\"Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx\"\"\"\n src, dst = None, None\n for filename in PathManager.ls(path):\n parts = filename.split(\".\")\n if len(parts) >= 3 and len(parts[1].split(\"-\")) == 2:\n return parts[1].split(\"-\")\n return src, dst\n\n\ndef collate_tokens(\n values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False,\n pad_to_length=None,\n pad_to_multiple=1,\n pad_to_bsz=None,\n):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n size = max(v.size(0) for v in values)\n size = size if pad_to_length is None else max(size, pad_to_length)\n if pad_to_multiple != 1 and size % pad_to_multiple != 0:\n size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)\n\n batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz)\n res = values[0].new(batch_size, size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n if eos_idx is None:\n # if no eos_idx is specified, then use the last token in src\n dst[0] = src[-1]\n else:\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])\n return res\n\n\ndef load_indexed_dataset(\n path, dictionary=None, dataset_impl=None, combine=False, default=\"cached\"\n):\n \"\"\"A helper function for loading indexed datasets.\n\n Args:\n path (str): path to indexed dataset (e.g., 'data-bin/train')\n dictionary (~fairseq.data.Dictionary): data dictionary\n dataset_impl (str, optional): which dataset implementation to use. If\n not provided, it will be inferred automatically. For legacy indexed\n data we use the 'cached' implementation by default.\n combine (bool, optional): automatically load and combine multiple\n datasets. For example, if *path* is 'data-bin/train', then we will\n combine 'data-bin/train', 'data-bin/train1', ... and return a\n single ConcatDataset instance.\n \"\"\"\n import fairseq.data.indexed_dataset as indexed_dataset\n from fairseq.data.concat_dataset import ConcatDataset\n\n datasets = []\n for k in itertools.count():\n path_k = path + (str(k) if k > 0 else \"\")\n try:\n path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)\n except Exception as e:\n if \"StorageException: [404] Path not found\" in str(e):\n logger.warning(f\"path_k: {e} not found\")\n else:\n raise e\n\n dataset_impl_k = dataset_impl\n if dataset_impl_k is None:\n dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)\n dataset = indexed_dataset.make_dataset(\n path_k,\n impl=dataset_impl_k or default,\n fix_lua_indexing=True,\n dictionary=dictionary,\n )\n if dataset is None:\n break\n logger.info(\"loaded {:,} examples from: {}\".format(len(dataset), path_k))\n datasets.append(dataset)\n if not combine:\n break\n if len(datasets) == 0:\n return None\n elif len(datasets) == 1:\n return datasets[0]\n else:\n return ConcatDataset(datasets)\n\n\[email protected]\ndef numpy_seed(seed, *addl_seeds):\n \"\"\"Context manager which seeds the NumPy PRNG with the specified seed and\n restores the state afterward\"\"\"\n if seed is None:\n yield\n return\n if len(addl_seeds) > 0:\n seed = int(hash((seed, *addl_seeds)) % 1e6)\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n\n\ndef collect_filtered(function, iterable, filtered):\n \"\"\"\n Similar to :func:`filter` but collects filtered elements in ``filtered``.\n\n Args:\n function (callable): function that returns ``False`` for elements that\n should be filtered\n iterable (iterable): iterable to filter\n filtered (list): list to store filtered elements\n \"\"\"\n for el in iterable:\n if function(el):\n yield el\n else:\n filtered.append(el)\n\n\ndef _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):\n def compare_leq(a, b):\n return a <= b if not isinstance(a, tuple) else max(a) <= b\n\n def check_size(idx):\n if isinstance(max_positions, float) or isinstance(max_positions, int):\n return size_fn(idx) <= max_positions\n elif isinstance(max_positions, dict):\n idx_size = size_fn(idx)\n assert isinstance(idx_size, dict)\n intersect_keys = set(max_positions.keys()) & set(idx_size.keys())\n return all(\n all(\n a is None or b is None or a <= b\n for a, b in zip(idx_size[key], max_positions[key])\n )\n for key in intersect_keys\n )\n else:\n # For MultiCorpusSampledDataset, will generalize it later\n if not isinstance(size_fn(idx), Iterable):\n return all(size_fn(idx) <= b for b in max_positions)\n return all(\n a is None or b is None or a <= b\n for a, b in zip(size_fn(idx), max_positions)\n )\n\n ignored = []\n itr = collect_filtered(check_size, indices, ignored)\n indices = np.fromiter(itr, dtype=np.int64, count=-1)\n return indices, ignored\n\n\ndef filter_by_size(indices, dataset, max_positions, raise_exception=False):\n \"\"\"\n [deprecated] Filter indices based on their size.\n Use `FairseqDataset::filter_indices_by_size` instead.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n dataset (FairseqDataset): fairseq dataset instance\n max_positions (tuple): filter elements larger than this size.\n Comparisons are done component-wise.\n raise_exception (bool, optional): if ``True``, raise an exception if\n any elements are filtered (default: False).\n \"\"\"\n warnings.warn(\n \"data_utils.filter_by_size is deprecated. \"\n \"Use `FairseqDataset::filter_indices_by_size` instead.\",\n stacklevel=2,\n )\n if isinstance(max_positions, float) or isinstance(max_positions, int):\n if hasattr(dataset, \"sizes\") and isinstance(dataset.sizes, np.ndarray):\n ignored = indices[dataset.sizes[indices] > max_positions].tolist()\n indices = indices[dataset.sizes[indices] <= max_positions]\n elif (\n hasattr(dataset, \"sizes\")\n and isinstance(dataset.sizes, list)\n and len(dataset.sizes) == 1\n ):\n ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()\n indices = indices[dataset.sizes[0][indices] <= max_positions]\n else:\n indices, ignored = _filter_by_size_dynamic(\n indices, dataset.size, max_positions\n )\n else:\n indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)\n\n if len(ignored) > 0 and raise_exception:\n raise Exception(\n (\n \"Size of sample #{} is invalid (={}) since max_positions={}, \"\n \"skip this example with --skip-invalid-size-inputs-valid-test\"\n ).format(ignored[0], dataset.size(ignored[0]), max_positions)\n )\n if len(ignored) > 0:\n logger.warning(\n (\n \"{} samples have invalid sizes and will be skipped, \"\n \"max_positions={}, first few sample ids={}\"\n ).format(len(ignored), max_positions, ignored[:10])\n )\n return indices\n\n\ndef filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):\n \"\"\"Filter a list of sample indices. Remove those that are longer\n than specified in max_sizes.\n\n Args:\n indices (np.array): original array of sample indices\n max_sizes (int or list[int] or tuple[int]): max sample size,\n can be defined separately for src and tgt (then list or tuple)\n\n Returns:\n np.array: filtered sample array\n list: list of removed indices\n \"\"\"\n if max_sizes is None:\n return indices, []\n if type(max_sizes) in (int, float):\n max_src_size, max_tgt_size = max_sizes, max_sizes\n else:\n max_src_size, max_tgt_size = max_sizes\n if tgt_sizes is None:\n ignored = indices[src_sizes[indices] > max_src_size]\n else:\n ignored = indices[\n (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)\n ]\n if len(ignored) > 0:\n if tgt_sizes is None:\n indices = indices[src_sizes[indices] <= max_src_size]\n else:\n indices = indices[\n (src_sizes[indices] <= max_src_size)\n & (tgt_sizes[indices] <= max_tgt_size)\n ]\n return indices, ignored.tolist()\n\n\ndef batch_by_size(\n indices,\n num_tokens_fn,\n num_tokens_vec=None,\n max_tokens=None,\n max_sentences=None,\n required_batch_size_multiple=1,\n fixed_shapes=None,\n):\n \"\"\"\n Yield mini-batches of indices bucketed by size. Batches may contain\n sequences of different lengths.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n num_tokens_fn (callable): function that returns the number of tokens at\n a given index\n num_tokens_vec (List[int], optional): precomputed vector of the number\n of tokens for each index in indices (to enable faster batch generation)\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n required_batch_size_multiple (int, optional): require batch size to\n be less than N or a multiple of N (default: 1).\n fixed_shapes (List[Tuple[int, int]], optional): if given, batches will\n only be created with the given shapes. *max_sentences* and\n *required_batch_size_multiple* will be ignored (default: None).\n \"\"\"\n try:\n from fairseq.data.data_utils_fast import (\n batch_by_size_fn,\n batch_by_size_vec,\n batch_fixed_shapes_fast,\n )\n except ImportError:\n raise ImportError(\n \"Please build Cython components with: \"\n \"`python setup.py build_ext --inplace`\"\n )\n except ValueError:\n raise ValueError(\n \"Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`.\"\n )\n\n # added int() to avoid TypeError: an integer is required\n max_tokens = int(max_tokens) if max_tokens is not None else -1\n max_sentences = max_sentences if max_sentences is not None else -1\n bsz_mult = required_batch_size_multiple\n\n if not isinstance(indices, np.ndarray):\n indices = np.fromiter(indices, dtype=np.int64, count=-1)\n\n if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):\n num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)\n\n if fixed_shapes is None:\n if num_tokens_vec is None:\n return batch_by_size_fn(\n indices,\n num_tokens_fn,\n max_tokens,\n max_sentences,\n bsz_mult,\n )\n else:\n return batch_by_size_vec(\n indices,\n num_tokens_vec,\n max_tokens,\n max_sentences,\n bsz_mult,\n )\n\n else:\n fixed_shapes = np.array(fixed_shapes, dtype=np.int64)\n sort_order = np.lexsort(\n [\n fixed_shapes[:, 1].argsort(), # length\n fixed_shapes[:, 0].argsort(), # bsz\n ]\n )\n fixed_shapes_sorted = fixed_shapes[sort_order]\n return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)\n\n\ndef post_process(sentence: str, symbol: str):\n if symbol == \"sentencepiece\":\n sentence = sentence.replace(\" \", \"\").replace(\"\\u2581\", \" \").strip()\n elif symbol == \"wordpiece\":\n sentence = sentence.replace(\" \", \"\").replace(\"_\", \" \").strip()\n elif symbol == \"letter\":\n sentence = sentence.replace(\" \", \"\").replace(\"|\", \" \").strip()\n elif symbol == \"silence\":\n import re\n\n sentence = sentence.replace(\"<SIL>\", \"\")\n sentence = re.sub(\" +\", \" \", sentence).strip()\n elif symbol == \"_EOW\":\n sentence = sentence.replace(\" \", \"\").replace(\"_EOW\", \" \").strip()\n elif symbol in {\"subword_nmt\", \"@@ \", \"@@\"}:\n if symbol == \"subword_nmt\":\n symbol = \"@@ \"\n sentence = (sentence + \" \").replace(symbol, \"\").rstrip()\n elif symbol == \"none\":\n pass\n elif symbol is not None:\n raise NotImplementedError(f\"Unknown post_process option: {symbol}\")\n return sentence\n\n\ndef compute_mask_indices(\n shape: Tuple[int, int],\n padding_mask: Optional[torch.Tensor],\n mask_prob: float,\n mask_length: int,\n mask_type: str = \"static\",\n mask_other: float = 0.0,\n min_masks: int = 0,\n no_overlap: bool = False,\n min_space: int = 0,\n require_same_masks: bool = True,\n mask_dropout: float = 0.0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape\n\n Args:\n shape: the the shape for which to compute masks.\n should be of size 2 where first element is batch size and 2nd is timesteps\n padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements\n mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by\n number of timesteps divided by length of mask span to mask approximately this percentage of all elements.\n however due to overlaps, the actual number will be smaller (unless no_overlap is True)\n mask_type: how to compute mask lengths\n static = fixed size\n uniform = sample from uniform distribution [mask_other, mask_length*2]\n normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element\n poisson = sample from possion distribution with lambda = mask length\n min_masks: minimum number of masked spans\n no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping\n min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans\n require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample\n mask_dropout: randomly dropout this percentage of masks in each example\n \"\"\"\n\n bsz, all_sz = shape\n mask = np.full((bsz, all_sz), False)\n\n all_num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * all_sz / float(mask_length)\n + np.random.rand()\n )\n\n all_num_mask = max(min_masks, all_num_mask)\n\n mask_idcs = []\n for i in range(bsz):\n if padding_mask is not None:\n sz = all_sz - padding_mask[i].long().sum().item()\n num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * sz / float(mask_length)\n + np.random.rand()\n )\n num_mask = max(min_masks, num_mask)\n else:\n sz = all_sz\n num_mask = all_num_mask\n\n if mask_type == \"static\":\n lengths = np.full(num_mask, mask_length)\n elif mask_type == \"uniform\":\n lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)\n elif mask_type == \"normal\":\n lengths = np.random.normal(mask_length, mask_other, size=num_mask)\n lengths = [max(1, int(round(x))) for x in lengths]\n elif mask_type == \"poisson\":\n lengths = np.random.poisson(mask_length, size=num_mask)\n lengths = [int(round(x)) for x in lengths]\n else:\n raise Exception(\"unknown mask selection \" + mask_type)\n\n if sum(lengths) == 0:\n lengths[0] = min(mask_length, sz - 1)\n\n if no_overlap:\n mask_idc = []\n\n def arrange(s, e, length, keep_length):\n span_start = np.random.randint(s, e - length)\n mask_idc.extend(span_start + i for i in range(length))\n\n new_parts = []\n if span_start - s - min_space >= keep_length:\n new_parts.append((s, span_start - min_space + 1))\n if e - span_start - length - min_space > keep_length:\n new_parts.append((span_start + length + min_space, e))\n return new_parts\n\n parts = [(0, sz)]\n min_length = min(lengths)\n for length in sorted(lengths, reverse=True):\n lens = np.fromiter(\n (e - s if e - s >= length + min_space else 0 for s, e in parts),\n np.int,\n )\n l_sum = np.sum(lens)\n if l_sum == 0:\n break\n probs = lens / np.sum(lens)\n c = np.random.choice(len(parts), p=probs)\n s, e = parts.pop(c)\n parts.extend(arrange(s, e, length, min_length))\n mask_idc = np.asarray(mask_idc)\n else:\n min_len = min(lengths)\n if sz - min_len <= num_mask:\n min_len = sz - num_mask - 1\n\n mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)\n\n mask_idc = np.asarray(\n [\n mask_idc[j] + offset\n for j in range(len(mask_idc))\n for offset in range(lengths[j])\n ]\n )\n\n mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))\n\n min_len = min([len(m) for m in mask_idcs])\n for i, mask_idc in enumerate(mask_idcs):\n if len(mask_idc) > min_len and require_same_masks:\n mask_idc = np.random.choice(mask_idc, min_len, replace=False)\n if mask_dropout > 0:\n num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int)\n mask_idc = np.random.choice(\n mask_idc, len(mask_idc) - num_holes, replace=False\n )\n\n mask[i, mask_idc] = True\n\n return mask\n\n\ndef get_mem_usage():\n try:\n import psutil\n\n mb = 1024 * 1024\n return f\"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb\"\n except ImportError:\n return \"N/A\"\n\n\n# lens: torch.LongTensor\n# returns: torch.BoolTensor\ndef lengths_to_padding_mask(lens):\n bsz, max_lens = lens.size(0), torch.max(lens).item()\n mask = torch.arange(max_lens).to(lens.device).view(1, max_lens)\n mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens)\n return mask\n\n\n# lens: torch.LongTensor\n# returns: torch.BoolTensor\ndef lengths_to_mask(lens):\n return ~lengths_to_padding_mask(lens)\n\n\ndef get_buckets(sizes, num_buckets):\n buckets = np.unique(\n np.percentile(\n sizes,\n np.linspace(0, 100, num_buckets + 1),\n interpolation=\"lower\",\n )[1:]\n )\n return buckets\n\n\ndef get_bucketed_sizes(orig_sizes, buckets):\n sizes = np.copy(orig_sizes)\n assert np.min(sizes) >= 0\n start_val = -1\n for end_val in buckets:\n mask = (sizes > start_val) & (sizes <= end_val)\n sizes[mask] = end_val\n start_val = end_val\n return sizes\n\n\ndef _find_extra_valid_paths(dataset_path: str) -> set:\n paths = utils.split_paths(dataset_path)\n all_valid_paths = set()\n for sub_dir in paths:\n contents = PathManager.ls(sub_dir)\n valid_paths = [c for c in contents if re.match(\"valid*[0-9].*\", c) is not None]\n all_valid_paths |= {os.path.basename(p) for p in valid_paths}\n # Remove .bin, .idx etc\n roots = {os.path.splitext(p)[0] for p in all_valid_paths}\n return roots\n\n\ndef raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:\n \"\"\"Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored.\"\"\"\n if (\n train_cfg.dataset.ignore_unused_valid_subsets\n or train_cfg.dataset.combine_valid_subsets\n or train_cfg.dataset.disable_validation\n or not hasattr(train_cfg.task, \"data\")\n ):\n return\n other_paths = _find_extra_valid_paths(train_cfg.task.data)\n specified_subsets = train_cfg.dataset.valid_subset.split(\",\")\n ignored_paths = [p for p in other_paths if p not in specified_subsets]\n if ignored_paths:\n advice = \"Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them.\"\n msg = f\"Valid paths {ignored_paths} will be ignored. {advice}\"\n raise ValueError(msg)\n"
]
| [
[
"numpy.random.rand",
"numpy.random.choice",
"numpy.copy",
"numpy.min",
"numpy.full",
"numpy.random.normal",
"numpy.random.poisson",
"numpy.random.get_state",
"numpy.random.randint",
"numpy.array",
"torch.max",
"numpy.random.set_state",
"numpy.asarray",
"torch.arange",
"numpy.random.seed",
"numpy.sum",
"numpy.fromiter",
"numpy.linspace",
"numpy.unique"
]
]
|
titikid/tvm | [
"0cf3765b28d457d2503ec20b551e9a8eadb1491d"
]
| [
"nnvm/python/nnvm/testing/tf.py"
]
| [
"# pylint: disable=invalid-name, unused-variable, unused-argument, no-init\n\"\"\"\nTensorflow Model Helpers\n========================\nSome helper definitions for tensorflow models.\n\"\"\"\nimport re\nimport os.path\nimport collections\nimport numpy as np\n\n# Tensorflow imports\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2\n\n######################################################################\n# Some helper functions\n# ---------------------\n\ndef ProcessGraphDefParam(graph_def):\n \"\"\"Type-checks and possibly canonicalizes `graph_def`.\n\n Parameters\n ----------\n graph_def : Obj\n tensorflow graph definition.\n\n Returns\n -------\n graph_def : Obj\n tensorflow graph devinition\n\n \"\"\"\n\n if not isinstance(graph_def, graph_pb2.GraphDef):\n # `graph_def` could be a dynamically-created message, so try a duck-typed\n # approach\n try:\n old_graph_def = graph_def\n graph_def = graph_pb2.GraphDef()\n graph_def.MergeFrom(old_graph_def)\n except TypeError:\n raise TypeError('graph_def must be a GraphDef proto.')\n return graph_def\n\nclass NodeLookup(object):\n \"\"\"Converts integer node ID's to human readable labels.\"\"\"\n\n def __init__(self,\n label_lookup_path=None,\n uid_lookup_path=None):\n self.node_lookup = self.load(label_lookup_path, uid_lookup_path)\n\n def load(self, label_lookup_path, uid_lookup_path):\n \"\"\"Loads a human readable English name for each softmax node.\n\n Parameters\n ----------\n label_lookup_path: String\n File containing String UID to integer node ID mapping .\n\n uid_lookup_path: String\n File containing String UID to human-readable string mapping.\n\n Returns\n -------\n node_id_to_name: dict\n dict from integer node ID to human-readable string.\n\n \"\"\"\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name\n\n def id_to_string(self, node_id):\n if node_id not in self.node_lookup:\n return ''\n return self.node_lookup[node_id]\n\ndef get_workload(model_path):\n \"\"\" Import workload from frozen protobuf\n\n Parameters\n ----------\n model_path: str\n model_path on remote repository to download from.\n\n Returns\n -------\n graph_def: graphdef\n graph_def is the tensorflow workload for mobilenet.\n\n \"\"\"\n\n repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/'\n model_name = os.path.basename(model_path)\n model_url = os.path.join(repo_base, model_path)\n\n from mxnet.gluon.utils import download\n download(model_url, model_name)\n\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\"./\", model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name='')\n return graph_def\n\n#######################################################################\n# PTB LSTMBlockCell Model\n# -----------------------\n\nclass PTBSmallConfig(object):\n \"\"\"Small config.\n This configurations are used when training the model\n \"\"\"\n num_layers = 2\n num_steps = 1\n hidden_size = 200\n batch_size = 1\n vocab_size = 10000\n init_scale = 0.1\n\ndef get_config():\n \"\"\"Configuration used for training the model\"\"\"\n return PTBSmallConfig()\n\ndef pick_from_weight(weight, pows=1.0):\n \"\"\"Identify token from Softmax output.\n This token will be mapped to word in the vocabulary.\n \"\"\"\n weight = weight**pows\n t = np.cumsum(weight)\n s = np.sum(weight)\n return int(np.searchsorted(t, 0.5 * s))\n\ndef do_tf_sample(session, data, in_states, num_samples):\n \"\"\"Sampled from the model\"\"\"\n samples = []\n sample = None\n #Cell inputs c and h should be passed for each layer explicitly.\n state_input_name = ['Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros:0',\n 'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1:0']\n state = session.run(state_input_name)\n\n #Graph nodes to be fetched as run output. Tensorflow LSTMBlockCell create internal\n #nodes for intermediate operations (gates) in the cell during run.\n #Cell state (c) is ':1'and cell output (h) is ':6' for each layer.\n fetches = [['Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1',\n 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6'],\n 'Model/Softmax:0']\n\n def _get_feed_dict(input_name, input_data):\n \"\"\"Create feed dict\"\"\"\n feed_dict = {}\n if isinstance(input_data, list):\n for i, e in enumerate(input_name):\n feed_dict[e] = input_data[i]\n else:\n feed_dict[input_name] = input_data\n return feed_dict\n\n for x in data:\n feed_dict = _get_feed_dict(state_input_name, state)\n feed_dict['Model/Placeholder:0'] = [[x]]\n state, probs = session.run(fetches, feed_dict)\n sample = pick_from_weight(probs[0])\n if sample is not None:\n samples.append(sample)\n else:\n samples.append(0)\n\n k = 1\n while k < num_samples:\n feed_dict = _get_feed_dict(state_input_name, state)\n feed_dict['Model/Placeholder:0'] = [[samples[-1]]]\n state, probs = session.run(fetches, feed_dict)\n sample = pick_from_weight(probs[0])\n samples.append(sample)\n k += 1\n return samples, state\n\ndef _create_ptb_vocabulary(data_dir):\n \"\"\"Read the PTB sample data input to create vocabulary\"\"\"\n data_path = data_dir+'simple-examples/data/'\n file_name = 'ptb.train.txt'\n def _read_words(filename):\n \"\"\"Read the data for creating vocabulary\"\"\"\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().encode(\"utf-8\").decode(\"utf-8\").replace(\"\\n\", \"<eos>\").split()\n\n def _build_vocab(filename):\n \"\"\"Create vocabulary\"\"\"\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n #for python 3.x\n id_to_word = dict((v, k) for k, v in word_to_id.items())\n return word_to_id, id_to_word\n\n def ptb_raw_data(data_path, file_name):\n \"\"\"Read the sample data and create vocabulary\"\"\"\n train_path = os.path.join(data_path, file_name)\n word_to_id, id_2_word = _build_vocab(train_path)\n return word_to_id, id_2_word\n return ptb_raw_data(data_path, file_name)\n\ndef get_workload_ptb():\n \"\"\" Import ptb workload from frozen protobuf\n\n Parameters\n ----------\n Nothing.\n\n Returns\n -------\n graph_def: graphdef\n graph_def is the tensorflow workload for ptb.\n\n word_to_id : dict\n English word to integer id mapping\n\n id_to_word : dict\n Integer id to English word mapping\n \"\"\"\n sample_repo = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'\n sample_data_file = 'simple-examples.tgz'\n sample_url = sample_repo+sample_data_file\n ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb'\n\n import tarfile\n from tvm.contrib.download import download\n DATA_DIR = './ptb_data/'\n if not os.path.exists(DATA_DIR):\n os.mkdir(DATA_DIR)\n download(sample_url, DATA_DIR+sample_data_file)\n t = tarfile.open(DATA_DIR+sample_data_file, 'r')\n t.extractall(DATA_DIR)\n\n word_to_id, id_to_word = _create_ptb_vocabulary(DATA_DIR)\n return word_to_id, id_to_word, get_workload(ptb_model_file)\n"
]
| [
[
"numpy.sum",
"tensorflow.GraphDef",
"tensorflow.gfile.Exists",
"tensorflow.import_graph_def",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.gfile.GFile",
"numpy.cumsum",
"tensorflow.logging.fatal",
"numpy.searchsorted"
]
]
|
suhuating/ML_CIA | [
"37838eb655d3e432393cee7dda11ea693217eb42"
]
| [
"TensorLayer/example1.py"
]
| [
"import tensorlayer as tl\nimport tensorflow as tf\n\nsess = tf.InteractiveSession()\n\n# 准备数据\nX_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784))\n\n# 定义placeholder None表示任意维度数据\nx = tf.placeholder(tf.float32, shape=[None, 784], name='x')\ny_ = tf.placeholder(tf.int64, shape=[None,], name='y_')\n\n# 定义模型\nnetwork = tl.layers.InputLayer(x, name='input_layer')\nnetwork = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')\nnetwork = tl.layers.DenseLayer(network, n_units=800, act=tf.nn.relu, name='relu1')\n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')\nnetwork = tl.layers.DenseLayer(network, n_units=800, act=tf.nn.relu, name='relu2')\n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')\nnetwork = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output_layer')\n\n\n# 定义损失函数与衡量指标\ny = network.outputs\ncost = tl.cost.cross_entropy(y, y_, name='cost')\ncorrect_prediction = tf.equal(tf.argmax(y,1), y_)\nacc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ny_op = tf.argmax(tf.nn.softmax(y), 1)\n\n\n# 定义 optimizer\ntrain_params = network.all_params\ntrain_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n use_locking=False).minimize(cost, var_list=train_params)\n\n# 初始化 session中的所有参数\ntl.layers.initialize_global_variables(sess)\n\n# 列出模型信息\nnetwork.print_params()\nnetwork.print_layers()\n\n# 训练模型\ntl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,\n acc=acc, batch_size=500, n_epoch=2, print_freq=5,\n X_val=X_val, y_val=y_val, eval_train=False)\n\n\n# 评估模型\ntl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost)\n\n# 保存模型 .npz 文件\ntl.files.save_npz(network.all_params, name='model.npz')\nsess.close()\n"
]
| [
[
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"tensorflow.placeholder",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.InteractiveSession"
]
]
|
YoungXueya/DeepCTR | [
"51835e7de83049bfeaa92cd53ee394446f797822"
]
| [
"deepctr/models/wdl.py"
]
| [
"# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Cheng H T, Koc L, Harmsen J, et al. Wide & deep learning for recommender systems[C]//Proceedings of the 1st Workshop on Deep Learning for Recommender Systems. ACM, 2016: 7-10.(https://arxiv.org/pdf/1606.07792.pdf)\n\"\"\"\n\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Dense\n\nfrom ..inputs import build_input_features, get_linear_logit, input_from_feature_columns, combined_dnn_input\nfrom ..layers.core import PredictionLayer, DNN\nfrom ..layers.utils import add_func\n\n\ndef WDL(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_linear=1e-5,\n l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',\n task='binary'):\n \"\"\"Instantiates the Wide&Deep Learning architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to wide part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n \"\"\"\n\n features = build_input_features(\n linear_feature_columns + dnn_feature_columns)\n\n inputs_list = list(features.values())\n\n sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,\n l2_reg_embedding, init_std, seed)\n\n linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',\n l2_reg=l2_reg_linear)\n\n dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)\n dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,\n False, seed)(dnn_input)\n dnn_logit = Dense(\n 1, use_bias=False, activation=None)(dnn_out)\n\n final_logit = add_func([dnn_logit, linear_logit])\n\n output = PredictionLayer(task)(final_logit)\n\n model = Model(inputs=inputs_list, outputs=output)\n return model\n"
]
| [
[
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.models.Model"
]
]
|
miles-weatherseed/reac-diff-solver | [
"1ea32b352f9eace2a51d2196e3b2b111c080ad7b"
]
| [
"tst/test_conjgrad.py"
]
| [
"import unittest\nimport numpy as np\nfrom scipy.sparse import lil_matrix, csc_matrix\nfrom reac_diff_solver.conjgrad import conjugate_gradients\n\nclass ConjugateGradientsTest(unittest.TestCase):\n def test_non_square(self):\n A = np.arange(0,6,1).reshape(3,2)\n b = np.zeros(3) # Don't care about validity\n x0 = np.zeros(3) # Don't care about validity\n with self.assertRaises(TypeError) as arctx:\n conjugate_gradients(A, b, x0)\n self.assertEqual(str(arctx.exception), \"The matrix provided is not square.\")\n\n def test_dim_mismatch_b(self):\n N = 3\n A = np.arange(0,N*N,1).reshape(N,N)\n b = np.zeros(N + 1)\n x0 = np.zeros(N) # Don't care about validity\n with self.assertRaises(TypeError) as arctx:\n conjugate_gradients(A, b, x0)\n self.assertEqual(str(arctx.exception), \"The dimensions of the right hand side do not match the dimensions of the matrix provided.\")\n\n def test_dim_mismatch_x0(self):\n N = 3\n A = np.arange(0,N*N,1).reshape(N,N)\n b = np.zeros(N)\n x0 = np.zeros(N + 1)\n with self.assertRaises(TypeError) as arctx:\n conjugate_gradients(A, b, x0)\n self.assertEqual(str(arctx.exception), \"The dimensions of the starting point do not match the dimensions of the matrix provided.\")\n \n def test_result_converging_dense(self):\n N = 3\n A = np.array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])\n b = np.ones(N)\n x0 = np.zeros(N)\n results = conjugate_gradients(A, b, x0)\n self.assertTrue(np.allclose(results[0], np.array([1.5, 2.0, 1.5])))\n\n def test_result_converging_sparse(self):\n N = 1000\n A = lil_matrix((N,N))\n A.setdiag(np.arange(1,N+1,1))\n A = np.eye(N) + A*A.transpose()\n b = np.ones(N)\n x0 = np.zeros(N)\n results = conjugate_gradients(csc_matrix(A), b, x0, nmax = 2*N)\n expected_results = np.ones(N) / A.diagonal()\n self.assertTrue(np.allclose(results[0], expected_results))\n\n def test_result_nonconverging(self):\n N = 3\n A = np.array([[3, -1, 4], [5, 1, 8], [1, 2, 0]]) # not definite because one eigenvalue is negative\n b = np.ones(N)\n x0 = np.zeros(N)\n with self.assertRaises(Exception) as arctx:\n results = conjugate_gradients(A, b, x0)\n self.assertEqual(str(arctx.exception), \"The iteration has failed to converge within nmax(=100) iterations.\")\n # If solved the examples[0] would have been [1, 0, -0.5]\n\nif __name__ == \"__main__\":\n unittest.main()"
]
| [
[
"numpy.array",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.ones",
"numpy.eye",
"numpy.allclose",
"numpy.arange",
"scipy.sparse.lil_matrix"
]
]
|
elephantmipt/catalyst | [
"6c706e4859ed7c58e5e6a5b7634176bffd0e2465"
]
| [
"catalyst/utils/torch.py"
]
| [
"from typing import Dict, Iterable, List, Union\nimport collections\nimport os\nimport re\n\nimport numpy as np\n\nimport torch\nfrom torch import nn, Tensor\nimport torch.backends\nfrom torch.backends import cudnn\n\nfrom catalyst.settings import IS_XLA_AVAILABLE\nfrom catalyst.typing import Device, Model, Optimizer\nfrom catalyst.utils.dict import merge_dicts\n\n\ndef get_optimizable_params(model_or_params):\n \"\"\"Returns all the parameters that requires gradients.\"\"\"\n params: Iterable[torch.Tensor] = model_or_params\n if isinstance(model_or_params, nn.Module):\n params = model_or_params.parameters()\n\n master_params = [p for p in params if p.requires_grad]\n return master_params\n\n\ndef get_optimizer_momentum(optimizer: Optimizer) -> float:\n \"\"\"Get momentum of current optimizer.\n\n Args:\n optimizer: PyTorch optimizer\n\n Returns:\n float: momentum at first param group\n \"\"\"\n betas = optimizer.param_groups[0].get(\"betas\", None)\n momentum = optimizer.param_groups[0].get(\"momentum\", None)\n return betas[0] if betas is not None else momentum\n\n\ndef set_optimizer_momentum(optimizer: Optimizer, value: float, index: int = 0):\n \"\"\"Set momentum of ``index`` 'th param group of optimizer to ``value``.\n\n Args:\n optimizer: PyTorch optimizer\n value: new value of momentum\n index (int, optional): integer index of optimizer's param groups,\n default is 0\n \"\"\"\n betas = optimizer.param_groups[0].get(\"betas\", None)\n momentum = optimizer.param_groups[0].get(\"momentum\", None)\n if betas is not None:\n _, beta = betas\n optimizer.param_groups[index][\"betas\"] = (value, beta)\n elif momentum is not None:\n optimizer.param_groups[index][\"momentum\"] = value\n\n\ndef get_device() -> torch.device:\n \"\"\"Simple returning the best available device (TPU > GPU > CPU).\"\"\"\n is_available_gpu = torch.cuda.is_available()\n device = \"cpu\"\n if IS_XLA_AVAILABLE:\n import torch_xla.core.xla_model as xm\n\n device = xm.xla_device()\n elif is_available_gpu:\n device = \"cuda\"\n return torch.device(device)\n\n\ndef get_available_gpus():\n \"\"\"Array of available GPU ids.\n\n Examples:\n >>> os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,2\"\n >>> get_available_gpus()\n [0, 2]\n\n >>> os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,-1,1\"\n >>> get_available_gpus()\n [0]\n\n >>> os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n >>> get_available_gpus()\n []\n\n >>> os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n >>> get_available_gpus()\n []\n\n Returns:\n iterable: available GPU ids\n \"\"\"\n if \"CUDA_VISIBLE_DEVICES\" in os.environ:\n result = os.environ[\"CUDA_VISIBLE_DEVICES\"].split(\",\")\n result = [id_ for id_ in result if id_ != \"\"]\n # invisible GPUs\n # https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars\n if -1 in result:\n index = result.index(-1)\n result = result[:index]\n elif torch.cuda.is_available():\n result = list(range(torch.cuda.device_count()))\n else:\n result = []\n return result\n\n\ndef get_activation_fn(activation: str = None):\n \"\"\"Returns the activation function from ``torch.nn`` by its name.\"\"\"\n if activation is None or activation.lower() == \"none\":\n activation_fn = lambda x: x # noqa: E731\n else:\n activation_fn = torch.nn.__dict__[activation]()\n return activation_fn\n\n\ndef any2device(value, device: Device):\n \"\"\"\n Move tensor, list of tensors, list of list of tensors,\n dict of tensors, tuple of tensors to target device.\n\n Args:\n value: Object to be moved\n device: target device ids\n\n Returns:\n Same structure as value, but all tensors and np.arrays moved to device\n \"\"\"\n if isinstance(value, dict):\n return {k: any2device(v, device) for k, v in value.items()}\n elif isinstance(value, (tuple, list)):\n return [any2device(v, device) for v in value]\n elif torch.is_tensor(value):\n return value.to(device, non_blocking=True)\n elif (\n isinstance(value, (np.ndarray, np.void))\n and value.dtype.fields is not None\n ):\n return {\n k: any2device(value[k], device) for k in value.dtype.fields.keys()\n }\n elif isinstance(value, np.ndarray):\n return torch.Tensor(value).to(device)\n return value\n\n\ndef prepare_cudnn(deterministic: bool = None, benchmark: bool = None) -> None:\n \"\"\"\n Prepares CuDNN benchmark and sets CuDNN\n to be deterministic/non-deterministic mode\n\n Args:\n deterministic: deterministic mode if running in CuDNN backend.\n benchmark: If ``True`` use CuDNN heuristics to figure out\n which algorithm will be most performant\n for your model architecture and input.\n Setting it to ``False`` may slow down your training.\n \"\"\"\n if torch.cuda.is_available():\n # CuDNN reproducibility\n # https://pytorch.org/docs/stable/notes/randomness.html#cudnn\n if deterministic is None:\n deterministic = (\n os.environ.get(\"CUDNN_DETERMINISTIC\", \"True\") == \"True\"\n )\n cudnn.deterministic = deterministic\n\n # https://discuss.pytorch.org/t/how-should-i-disable-using-cudnn-in-my-code/38053/4\n if benchmark is None:\n benchmark = os.environ.get(\"CUDNN_BENCHMARK\", \"True\") == \"True\"\n cudnn.benchmark = benchmark\n\n\ndef process_model_params(\n model: Model,\n layerwise_params: Dict[str, dict] = None,\n no_bias_weight_decay: bool = True,\n lr_scaling: float = 1.0,\n) -> List[Union[torch.nn.Parameter, dict]]:\n \"\"\"Gains model parameters for ``torch.optim.Optimizer``.\n\n Args:\n model: Model to process\n layerwise_params: Order-sensitive dict where\n each key is regex pattern and values are layer-wise options\n for layers matching with a pattern\n no_bias_weight_decay: If true, removes weight_decay\n for all ``bias`` parameters in the model\n lr_scaling: layer-wise learning rate scaling,\n if 1.0, learning rates will not be scaled\n\n Returns:\n iterable: parameters for an optimizer\n\n Example::\n\n >>> model = catalyst.contrib.models.segmentation.ResnetUnet()\n >>> layerwise_params = collections.OrderedDict([\n >>> (\"conv1.*\", dict(lr=0.001, weight_decay=0.0003)),\n >>> (\"conv.*\", dict(lr=0.002))\n >>> ])\n >>> params = process_model_params(model, layerwise_params)\n >>> optimizer = torch.optim.Adam(params, lr=0.0003)\n\n \"\"\"\n params = list(model.named_parameters())\n layerwise_params = layerwise_params or collections.OrderedDict()\n\n model_params = []\n for name, parameters in params:\n options = {}\n for pattern, pattern_options in layerwise_params.items():\n if re.match(pattern, name) is not None:\n # all new LR rules write on top of the old ones\n options = merge_dicts(options, pattern_options)\n\n # no bias decay from https://arxiv.org/abs/1812.01187\n if no_bias_weight_decay and name.endswith(\"bias\"):\n options[\"weight_decay\"] = 0.0\n\n # lr linear scaling from https://arxiv.org/pdf/1706.02677.pdf\n if \"lr\" in options:\n options[\"lr\"] *= lr_scaling\n\n model_params.append({\"params\": parameters, **options})\n\n return model_params\n\n\ndef get_requires_grad(model: Model):\n \"\"\"Gets the ``requires_grad`` value for all model parameters.\n\n Example::\n\n >>> model = SimpleModel()\n >>> requires_grad = get_requires_grad(model)\n\n Args:\n model: model\n\n Returns:\n requires_grad (Dict[str, bool]): value\n \"\"\"\n requires_grad = {}\n for name, param in model.named_parameters():\n requires_grad[name] = param.requires_grad\n return requires_grad\n\n\ndef set_requires_grad(\n model: Model, requires_grad: Union[bool, Dict[str, bool]]\n):\n \"\"\"Sets the ``requires_grad`` value for all model parameters.\n\n Example::\n\n >>> model = SimpleModel()\n >>> set_requires_grad(model, requires_grad=True)\n >>> # or\n >>> model = SimpleModel()\n >>> set_requires_grad(model, requires_grad={\"\"})\n\n Args:\n model: model\n requires_grad (Union[bool, Dict[str, bool]]): value\n \"\"\"\n if isinstance(requires_grad, dict):\n for name, param in model.named_parameters():\n assert (\n name in requires_grad\n ), f\"Parameter `{name}` does not exist in requires_grad\"\n param.requires_grad = requires_grad[name]\n else:\n requires_grad = bool(requires_grad)\n for param in model.parameters():\n param.requires_grad = requires_grad\n\n\ndef get_network_output(net: Model, *input_shapes_args, **input_shapes_kwargs):\n \"\"\"# noqa: D202\n For each input shape returns an output tensor\n\n Examples:\n >>> net = nn.Linear(10, 5)\n >>> utils.get_network_output(net, (1, 10))\n tensor([[[-0.2665, 0.5792, 0.9757, -0.5782, 0.1530]]])\n\n Args:\n net: the model\n *input_shapes_args: variable length argument list of shapes\n **input_shapes_kwargs: key-value arguemnts of shapes\n\n Returns:\n tensor with network output\n \"\"\"\n\n def _rand_sample(\n input_shape,\n ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:\n if isinstance(input_shape, dict):\n input_t = {\n key: torch.Tensor(torch.randn((1,) + key_input_shape))\n for key, key_input_shape in input_shape.items()\n }\n else:\n input_t = torch.Tensor(torch.randn((1,) + input_shape))\n return input_t\n\n input_args = [\n _rand_sample(input_shape) for input_shape in input_shapes_args\n ]\n input_kwargs = {\n key: _rand_sample(input_shape)\n for key, input_shape in input_shapes_kwargs.items()\n }\n\n output_t = net(*input_args, **input_kwargs)\n return output_t\n\n\ndef detach(tensor: torch.Tensor) -> np.ndarray:\n \"\"\"Detach a pytorch tensor from graph and\n convert it to numpy array\n\n Args:\n tensor: PyTorch tensor\n\n Returns:\n numpy ndarray\n \"\"\"\n return tensor.cpu().detach().numpy()\n\n\ndef trim_tensors(tensors):\n \"\"\"\n Trim padding off of a batch of tensors to the smallest possible length.\n Should be used with `catalyst.data.DynamicLenBatchSampler`.\n\n Adapted from `Dynamic minibatch trimming to improve BERT training speed`_.\n\n Args:\n tensors: list of tensors to trim.\n\n Returns:\n List[torch.tensor]: list of trimmed tensors.\n\n .. _`Dynamic minibatch trimming to improve BERT training speed`:\n https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779\n \"\"\"\n max_len = torch.max(torch.sum((tensors[0] != 0), 1))\n if max_len > 2:\n tensors = [tsr[:, :max_len] for tsr in tensors]\n return tensors\n\n\ndef normalize(samples: Tensor) -> Tensor:\n \"\"\"\n Args:\n samples: tensor with shape of [n_samples, features_dim]\n\n Returns:\n normalized tensor with the same shape\n \"\"\"\n norms = torch.norm(samples, p=2, dim=1).unsqueeze(1)\n samples = samples / (norms + torch.finfo(torch.float32).eps)\n return samples\n\n\n__all__ = [\n \"get_optimizable_params\",\n \"get_optimizer_momentum\",\n \"set_optimizer_momentum\",\n \"get_device\",\n \"get_available_gpus\",\n \"get_activation_fn\",\n \"any2device\",\n \"prepare_cudnn\",\n \"process_model_params\",\n \"get_requires_grad\",\n \"set_requires_grad\",\n \"get_network_output\",\n \"detach\",\n \"trim_tensors\",\n \"normalize\",\n]\n"
]
| [
[
"torch.device",
"torch.is_tensor",
"torch.norm",
"torch.finfo",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.Tensor",
"torch.randn",
"torch.sum"
]
]
|
cnmy-ro/direct-custom | [
"a354e82d4f4b7598037e7b9dc73456fc361820ac"
]
| [
"direct/nn/multidomainnet/multidomainnet_engine.py"
]
| [
"# coding=utf-8\n# Copyright (c) DIRECT Contributors\n\nfrom typing import Callable, Dict, Optional\n\nimport torch\nfrom torch import nn\nfrom torch.cuda.amp import autocast\n\nimport direct.data.transforms as T\nfrom direct.config import BaseConfig\nfrom direct.engine import DoIterationOutput\nfrom direct.nn.mri_models import MRIModelEngine\nfrom direct.utils import detach_dict, dict_to_device, reduce_list_of_dicts\n\n\nclass MultiDomainNetEngine(MRIModelEngine):\n \"\"\"Multi Domain Network Engine.\"\"\"\n\n def __init__(\n self,\n cfg: BaseConfig,\n model: nn.Module,\n device: str,\n forward_operator: Optional[Callable] = None,\n backward_operator: Optional[Callable] = None,\n mixed_precision: bool = False,\n **models: nn.Module,\n ):\n \"\"\"Inits :class:`MultiDomainNetEngine.\"\"\"\n super().__init__(\n cfg,\n model,\n device,\n forward_operator=forward_operator,\n backward_operator=backward_operator,\n mixed_precision=mixed_precision,\n **models,\n )\n\n self._spatial_dims = (2, 3)\n\n def _do_iteration(\n self,\n data: Dict[str, torch.Tensor],\n loss_fns: Optional[Dict[str, Callable]] = None,\n regularizer_fns: Optional[Dict[str, Callable]] = None,\n ) -> DoIterationOutput:\n\n # loss_fns can be done, e.g. during validation\n if loss_fns is None:\n loss_fns = {}\n\n if regularizer_fns is None:\n regularizer_fns = {}\n\n loss_dicts = []\n regularizer_dicts = []\n\n data = dict_to_device(data, self.device)\n\n # sensitivity_map of shape (batch, coil, height, width, complex=2)\n sensitivity_map = data[\"sensitivity_map\"].clone()\n data[\"sensitivity_map\"] = self.compute_sensitivity_map(sensitivity_map)\n\n with autocast(enabled=self.mixed_precision):\n\n output_multicoil_image = self.model(\n masked_kspace=data[\"masked_kspace\"],\n sensitivity_map=data[\"sensitivity_map\"],\n )\n\n output_image = T.root_sum_of_squares(\n output_multicoil_image, self._coil_dim, self._complex_dim\n ) # shape (batch, height, width)\n\n loss_dict = {k: torch.tensor([0.0], dtype=data[\"target\"].dtype).to(self.device) for k in loss_fns.keys()}\n regularizer_dict = {\n k: torch.tensor([0.0], dtype=data[\"target\"].dtype).to(self.device) for k in regularizer_fns.keys()\n }\n\n for key, value in loss_dict.items():\n loss_dict[key] = value + loss_fns[key](\n output_image,\n **data,\n reduction=\"mean\",\n )\n\n for key, value in regularizer_dict.items():\n regularizer_dict[key] = value + regularizer_fns[key](\n output_image,\n **data,\n )\n\n loss = sum(loss_dict.values()) + sum(regularizer_dict.values())\n\n if self.model.training:\n self._scaler.scale(loss).backward()\n\n loss_dicts.append(detach_dict(loss_dict))\n regularizer_dicts.append(\n detach_dict(regularizer_dict)\n ) # Need to detach dict as this is only used for logging.\n\n # Add the loss dicts.\n loss_dict = reduce_list_of_dicts(loss_dicts, mode=\"sum\")\n regularizer_dict = reduce_list_of_dicts(regularizer_dicts, mode=\"sum\")\n\n return DoIterationOutput(\n output_image=output_image,\n sensitivity_map=data[\"sensitivity_map\"],\n data_dict={**loss_dict, **regularizer_dict},\n )\n"
]
| [
[
"torch.cuda.amp.autocast",
"torch.tensor"
]
]
|
ETCBC/ssi_morphology | [
"2f784b6a440b990968f504da02c0bd8881b7bf1b"
]
| [
"evaluation.py"
]
| [
"# This module provides a method for calculating the incorrectness\n# of a morphological encoding or the distance between two encodings.\n# It exports three functions: mc_badness(), mc_distance(code1, code2)\n# and mc_load_morphemes(language).\n\n# mc_load_morphemes() needs to be called before the first call to\n# mc_badness() and mc_distance() in order to initialise the appropriate\n# language (currently Hebrew or Syriac).\n\n# mc_badness() takes one or two arguments: the surface form and the\n# encoding. If only the encoding is given, error level 1 (difference\n# of the surface form with the true surface form) is not evaluated.\n# It returns zero for a correct encoding or a positive integer\n# indicating how bad the encoding is.\n\n# mc_distance(code1, code2) takes as arguments two strings, which\n# are supposed to represent morphological encodings, and returns an\n# integer indicating how far apart both encodings are. If one of\n# the two strings is known to be a correct encoding, the distance\n# expresses how `bad' the other is.\n\n# Both functions return an integer < 262144 (which is 8**6).\n\n# mc_load_morphemes(language) takes one string as argument, the name\n# of the language of which the paradigmatic forms of the morphemes\n# are to be loaded. They are retrieved from a file {Language}.json,\n# which must reside in the current directory.\n\n# Base of the number system in which the distance is counted\nBase = 8\n\n\n# The `badness' of the encoding is evaluated in six respects,\n# which gives rise to a vector of six dimensions. Each dimension is\n# considered a factor `Base' less serious than the previous.\n# v[0] = number of parse errors in the encoding\n# v[1] = edit distance of the surface form to the true surface form\n# v[2] = number of ungrammatical morpheme type combinations\n# v[3] = number of unparadigmatic morphemes\n# v[4] = difference in number of analytical words with the true form\n# v[5] = number of morphemes that differ from the true form\nDimensions = 6\n\nConsonants = '>BGDHWZXVJKLMNS<PYQRFCT'\n\n# Global variables that hold the morphemes of the language\n\n# Concatenative morphemes\nCCM_Types = []\nCCM = []\nPrefixes = []\nSuffixes = []\n\n# Nonconcatenative morphemes\nNCM_Types = []\nNCM = []\n\n\ndef mc_load_morphemes(l):\n '''Load the morphemes of the language (l) from a file.'''\n from json import load\n\n with open(l + '.json') as f:\n d = load(f)\n\n # Concatenative morphemes\n global CCM_Types, CCM, Prefixes, Suffixes\n CCM_Types = d['CCM_Types']\n CCM = d['CCM']\n\n Prefixes = CCM_Types[:d['N_Prefixes']]\n Suffixes = CCM_Types[-d['N_Suffixes']:]\n\n # Nonconcatenative morphemes\n global NCM_Types, NCM\n NCM_Types = d['NCM_Types']\n NCM = d['NCM']\n for m in NCM_Types:\n NCM[m] = set(NCM[m])\n\n\n# In the class Word, morphemes are stored as a pair (tuple) of a\n# realised form [0] and a paradigmatic form [1] like ('T', 'T=').\nclass Word():\n def __init__(self, prefixes, lexeme, suffixes):\n self.morphemes = {}\n self.morphemes['pfm'] = prefixes[0]\n self.morphemes['pfx'] = prefixes[1]\n self.morphemes['vbs'] = prefixes[2]\n self.morphemes['lex'] = lexeme\n self.morphemes['vbe'] = suffixes[0]\n self.morphemes['nme'] = suffixes[1]\n self.morphemes['uvf'] = suffixes[2]\n self.morphemes['vpm'] = suffixes[3]\n self.morphemes['prs'] = suffixes[4]\n\n def surface(self):\n s = ''\n for m in CCM_Types:\n if self.morphemes[m]:\n s += self.morphemes[m][0]\n return s\n\n# The morphological code is parsed using McLexer and McParser\nfrom sly import Lexer, Parser\n\n# The token CHR makes sure that any input can be tokenised and no\n# errors are raised. It is not used by the parser.\nclass McLexer(Lexer):\n def error(self, t):\n assert(False)\n\n tokens = { PFM, PFX, VBS, LETTER, HOMOGRAPHY, VBE, NME, UVF,\n VPM, VOWEL_PATTERN, PRS, CHR }\n literals = { '-', '(', '&' }\n\n PFM = '!'\n PFX = '@'\n VBS = '\\\\]'\n LETTER = f'[{Consonants}]'\n HOMOGRAPHY = '=+'\n VBE = '\\\\['\n NME = '/'\n UVF = '~'\n VOWEL_PATTERN = '[a-z]+'\n VPM = ':'\n PRS = '\\\\+'\n CHR = '[^-(&]'\n\n# The parser returns a list of analytic words as objects of the\n# class Word.\nclass McParser(Parser):\n #debugfile = 'parser.out'\n\n def __init__(self):\n super().__init__()\n self.status = 0\n\n def error(self, p):\n #print('Syntax error:', self.symstack)\n # Just discard the token and tell the parser it's okay.\n self.errok()\n self.status += 1\n\n tokens = McLexer.tokens - {'CHR'}\n\n @_('words')\n def wordlist(self, p):\n return (p[0], self.status)\n\n @_('word')\n def words(self, p):\n return [p[0]]\n\n @_('words \"-\" word')\n def words(self, p):\n p[0].append(p[2])\n return p[0]\n\n @_('prefixes lexeme suffixes')\n def word(self, p):\n return Word(p[0], p[1], p[2])\n\n @_('preformative reflexive verbal_stem')\n def prefixes(self, p):\n return (p[0], p[1], p[2])\n\n @_('form')\n def lexeme(self, p):\n return p.form\n\n @_('verbal_ending nominal_ending univalent_final \\\n vowel_pattern_mark pronominal_suffix')\n def suffixes(self, p):\n return (p[0], p[1], p[2], p[3], p[4])\n\n @_('empty')\n def preformative(self, p):\n return None\n\n @_('PFM form PFM')\n def preformative(self, p):\n return p[1]\n\n @_('empty')\n def reflexive(self, p):\n return None\n\n @_('PFX form PFX')\n def reflexive(self, p):\n return p[1]\n\n @_('empty')\n def verbal_stem(self, p):\n return None\n\n @_('VBS form VBS')\n def verbal_stem(self, p):\n return p[1]\n\n @_('VBE form')\n def verbal_ending(self, p):\n return p[1]\n\n @_('empty')\n def verbal_ending(self, p):\n return None\n\n @_('NME form')\n def nominal_ending(self, p):\n return p[1]\n\n @_('empty')\n def nominal_ending(self, p):\n return None\n\n @_('UVF form')\n def univalent_final(self, p):\n return p[1]\n\n @_('empty')\n def univalent_final(self, p):\n return None\n\n @_('VPM VOWEL_PATTERN')\n def vowel_pattern_mark(self, p):\n return set(p[1])\n\n @_('empty')\n def vowel_pattern_mark(self, p):\n return None\n\n @_('PRS form')\n def pronominal_suffix(self, p):\n return p[1]\n\n @_('empty')\n def pronominal_suffix(self, p):\n return None\n\n @_('empty')\n def form(self, p):\n return ('', '')\n\n @_('letters homography')\n def form(self, p):\n realised = ''.join([t[0] for t in p[0]])\n paradigmatic = ''.join([t[1] for t in p[0]]) + p[1]\n return (realised, paradigmatic)\n\n @_('letter')\n def letters(self, p):\n return [p[0]]\n\n @_('letters letter')\n def letters(self, p):\n p[0].append(p[1])\n return p[0]\n\n @_('empty', 'HOMOGRAPHY')\n def homography(self, p):\n return p[0]\n\n @_('')\n def empty(self, p):\n return ''\n\n @_('plain_letter', 'deleted_letter', 'added_letter')\n def letter(self, p):\n return p[0]\n\n @_('LETTER')\n def plain_letter(self, p):\n return (p[0], p[0])\n\n @_('\"(\" LETTER')\n def deleted_letter(self, p):\n return ('', p[1])\n\n @_('\"&\" LETTER')\n def added_letter(self, p):\n return (p[1], '')\n\n\ndef ungrammatical_combinations(wl):\n '''Count the number of ungrammatical morpheme type combinations in\n word list (wl).'''\n r = 0\n for w in wl:\n for m in Prefixes:\n if w.morphemes[m] and not w.morphemes['vbe']:\n r += 1\n if w.morphemes['pfm']:\n for v in {'M'}:\n if v == w.morphemes['pfm'][1] and not w.morphemes['nme']:\n r += 1\n if w.morphemes['vpm']:\n for v in {'a', 'c'}:\n if v in w.morphemes['vpm'] and not w.morphemes['nme']:\n r += 1\n for v in {'d', 'p', 'o', 'u'}:\n if v in w.morphemes['vpm'] and not w.morphemes['vbe']:\n r += 1\n return r\n\n\ndef unparadigmatic_morphemes(wl):\n '''Count the number of unparadigmatic morphemes in word list (wl).'''\n r = 0\n for w in wl:\n for m in Prefixes + Suffixes:\n if w.morphemes[m] and w.morphemes[m][1] not in CCM[m]:\n r += 1\n for m in NCM_Types:\n if w.morphemes[m] and not (w.morphemes[m] < NCM[m]):\n r += 1\n return r\n\n\n# The evaluation of the word list returned by the parser is done\n# in two steps. First the dimensions [0,2,3] are assigned, which\n# can be calculated without comparison with the true form. This is\n# performed by evaluate(). Then the other dimensions are assigned\n# through comparison with the true form. This is done by compare().\n\nfrom numpy import zeros\n\ndef evaluate(wl, e):\n '''Evaluate the three dimensions which can be calculated\n individually for word list (wl) with (e) syntax errors.'''\n v = zeros(Dimensions, dtype=int)\n v[0] = e\n v[2] = ungrammatical_combinations(wl)\n v[3] = unparadigmatic_morphemes(wl)\n return v\n\n\ndef morpheme_comparison(w1, w2):\n '''Return the number of morphemes that differ in their analysis\n between word w1 and w2.'''\n r = 0\n for m in CCM_Types:\n if w1.morphemes[m] != w2.morphemes[m]:\n r += 1\n for m in NCM_Types:\n if w1.morphemes[m] != w2.morphemes[m]:\n r += 1\n return r\n\n\ndef wlsurface(wl):\n '''Return the surface of a word list.'''\n s = ''\n for w in wl:\n s += w.surface()\n return s\n\n\nfrom Levenshtein import distance\n\ndef compare(w1, w2, v1, v2):\n '''Return a vector with an error count in all dimensions for the\n difference in analysis between word w1 and w2. The vectors v1\n and v2 already contain the counts of the individual evaluation\n of the words.'''\n v = abs(v1 - v2)\n v[1] = distance(wlsurface(w1), wlsurface(w2))\n v[4] = abs(len(w1) - len(w2))\n if v[4] == 0:\n for i in range(len(w1)):\n v[5] += morpheme_comparison(w1[i], w2[i])\n return v\n\n\ndef badness(d):\n '''Map the difference vector onto a nonnegative integer.'''\n assert(not any(d < 0))\n r = 0\n for x in d:\n r = Base * r + min(x, Base - 1)\n return r\n\n\ndef mc_parse(s):\n '''Call the lexer and parser for the morphological encoding of\n string (s) and return the word list and error count.'''\n lex = McLexer()\n parser = McParser()\n r = parser.parse(lex.tokenize(s))\n if r:\n return r\n else:\n return ([], Base)\n\n\ndef mc_distance(s1, s2):\n '''Return a nonnegative number that expresses how badly the\n morphological encodings in strings s1 and s2 differ.'''\n if s1 == s2:\n return 0\n else:\n w1, e1 = mc_parse(s1)\n v1 = evaluate(w1, e1)\n #assert(not any(v1))\n w2, e2 = mc_parse(s2)\n v2 = evaluate(w2, e2)\n v = compare(w1, w2, v1, v2)\n #print(v1, v2, v)\n return badness(v)\n\n\nfrom re import sub\nnon_consonants = f'[^{Consonants}]'\n\ndef consonants(s):\n '''Strip all non-consonants from a surface form.'''\n return sub(non_consonants, '', s)\n\n\n# mc_badness() takes one or two arguments: the surface form and the\n# encoding. If only the encoding is given, error level 1 (difference\n# of the surface form with the true surface form) is not evaluated.\n\ndef mc_badness(*s):\n '''Return a nonnegative number that expresses how bad the encoding\n of the surface form is.'''\n w, e = mc_parse(s[-1])\n v = evaluate(w, e)\n if len(s) > 1:\n v[1] = distance(consonants(s[0]), wlsurface(w))\n return badness(v)\n"
]
| [
[
"numpy.zeros"
]
]
|
rohts-patil/keras | [
"a8bbcf611f64c1b36fda671534f25a0366c076b7"
]
| [
"tests/keras/layers/test_normalization.py"
]
| [
"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom keras.layers.core import Dense, Activation\nfrom keras.utils.test_utils import layer_test, keras_test\nfrom keras.layers import normalization\nfrom keras.models import Sequential\nfrom keras import backend as K\n\ninput_1 = np.arange(10)\ninput_2 = np.zeros(10)\ninput_3 = np.ones((10))\ninput_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]\n\n\n@keras_test\ndef basic_batchnorm_test():\n from keras import regularizers\n layer_test(normalization.BatchNormalization,\n kwargs={'mode': 1,\n 'gamma_regularizer': regularizers.l2(0.01),\n 'beta_regularizer': regularizers.l2(0.01)},\n input_shape=(3, 4, 2))\n layer_test(normalization.BatchNormalization,\n kwargs={'mode': 0},\n input_shape=(3, 4, 2))\n\n\n@keras_test\ndef test_batchnorm_mode_0_or_2():\n for mode in [0, 2]:\n model = Sequential()\n norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)\n model.add(norm_m0)\n model.compile(loss='mse', optimizer='sgd')\n\n # centered on 5.0, variance 10.0\n X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))\n model.fit(X, X, nb_epoch=4, verbose=0)\n out = model.predict(X)\n out -= K.eval(norm_m0.beta)\n out /= K.eval(norm_m0.gamma)\n\n assert_allclose(out.mean(), 0.0, atol=1e-1)\n assert_allclose(out.std(), 1.0, atol=1e-1)\n\n\n@keras_test\ndef test_batchnorm_mode_0_convnet():\n model = Sequential()\n norm_m0 = normalization.BatchNormalization(mode=0, axis=1, input_shape=(3, 4, 4), momentum=0.8)\n model.add(norm_m0)\n model.compile(loss='mse', optimizer='sgd')\n\n # centered on 5.0, variance 10.0\n X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(X, X, nb_epoch=4, verbose=0)\n out = model.predict(X)\n out -= np.reshape(K.eval(norm_m0.beta), (1, 3, 1, 1))\n out /= np.reshape(K.eval(norm_m0.gamma), (1, 3, 1, 1))\n\n assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)\n assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)\n\n\n@keras_test\ndef test_batchnorm_mode_1():\n norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1)\n norm_m1.build(input_shape=(None, 10))\n\n for inp in [input_1, input_2, input_3]:\n out = (norm_m1.call(K.variable(inp)) - norm_m1.beta) / norm_m1.gamma\n assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)\n if inp.std() > 0.:\n assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)\n else:\n assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n"
]
| [
[
"numpy.random.normal",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.std",
"numpy.arange"
]
]
|
liujie40/msticpy | [
"421a8044253f8092e71a1f1b0531fbb4b1bf7483"
]
| [
"msticpy/nbtools/nbinit.py"
]
| [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Initialization for Jupyter Notebooks.\"\"\"\nimport importlib\nimport io\nimport os\nimport sys\nimport traceback\nimport warnings\nfrom contextlib import redirect_stdout\nfrom functools import wraps\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport ipywidgets as widgets\nimport pandas as pd\nimport yaml\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.display import HTML, display\nfrom matplotlib import MatplotlibDeprecationWarning\n\ntry:\n import seaborn as sns\nexcept ImportError:\n sns = None\n\nfrom .._version import VERSION\nfrom ..common.azure_auth_core import check_cli_credentials, AzureCliStatus\nfrom ..common.check_version import check_version\nfrom ..common.exceptions import MsticpyException, MsticpyUserError\nfrom ..common.pkg_config import get_config, validate_config\nfrom ..common.utility import (\n check_and_install_missing_packages,\n check_kwargs,\n is_ipython,\n md,\n search_for_file,\n unit_testing,\n)\nfrom ..config import MpConfigFile\nfrom ..datamodel.pivot import Pivot\nfrom .azure_ml_tools import check_versions as check_versions_aml\nfrom .azure_ml_tools import is_in_aml\nfrom .user_config import load_user_defaults\n\n__version__ = VERSION\n__author__ = \"Ian Hellen\"\n\n\n_IMPORT_ERR_MSSG = \"\"\"\n<h2><font color='red'>One or more missing packages detected</h2>\nPlease correct these by installing the required packages, restart\nthe kernel and re-run the notebook.</font>\n<i>Package error: {err}</i><br>\n\"\"\"\n\n_IMPORT_MODULE_MSSG = \"\"\"\n<font color='red'>Error import module {module}</font>\n\"\"\"\n\n_MISSING_PKG_WARN = \"\"\"\n<h3><font color='orange'>Warning {package} is not installed or has an\nincorrect version</h3></font>\n\"\"\"\n\n_HELP_URIS = [\n (\n '<li><a href=\"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'\n 'A%20Getting%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb\"'\n 'target=\"_blank\" rel=\"noopener noreferrer\">'\n \"Getting Started (notebook)</a></li>\"\n ),\n (\n '<li><a href=\"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'\n 'ConfiguringNotebookEnvironment.ipynb\"'\n 'target=\"_blank\" rel=\"noopener noreferrer\">'\n \"Configuring your Notebook environment (notebook)</a></li>\"\n ),\n (\n '<li><a href=\"https://msticpy.readthedocs.io/en/latest/getting_started/'\n 'msticpyconfig.html\"'\n 'target=\"_blank\" rel=\"noopener noreferrer\">'\n \"Configuring MSTICPy settings (doc)</a></li>\"\n ),\n (\n '<li><a href=\"https://msticpy.readthedocs.io/en/latest/getting_started/'\n 'SettingsEditor.html\"'\n 'target=\"_blank\" rel=\"noopener noreferrer\">'\n \"MSTICPy settings editor (doc)</a></li>\"\n ),\n (\n '<li><a href=\"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/'\n 'master/TroubleShootingNotebooks.ipynb\"'\n 'target=\"_blank\" rel=\"noopener noreferrer\">'\n \"Trouble-Shooting Notebooks (notebook)</a></li>\"\n ),\n]\n\n_MISSING_MPCONFIG_ENV_ERR = f\"\"\"\n<h3><font color='orange'>Warning: no <i>msticpyconfig.yaml</i> found</h3></font>\nThe MSTICPYCONFIG environment variable is set but does not point\nto a valid file.<br>\nSome functionality (such as Threat Intel lookups) will not function without\nvalid configuration settings.<br>\nThe following resources will help you set up your configuration:\n<ul>{\"\".join(_HELP_URIS)}</ul>\n<br>You can load and run the first two of these from the Microsoft Sentinel\n<b>Notebooks</b> tab\n\"\"\"\n\n\n_PANDAS_REQ_VERSION = (0, 25, 0)\n\n\ndef _get_verbosity_setting() -> Callable[[Optional[int]], int]:\n \"\"\"Closure for holding trace setting.\"\"\"\n _verbosity = 1\n\n def _verbose(verbosity: Optional[int] = None) -> int:\n nonlocal _verbosity\n if verbosity is not None:\n _verbosity = verbosity\n return _verbosity\n\n return _verbose\n\n\n_VERBOSITY: Callable[[Optional[int]], int] = _get_verbosity_setting()\n\n_NB_IMPORTS = [\n dict(pkg=\"pandas\", alias=\"pd\"),\n dict(pkg=\"IPython\", tgt=\"get_ipython\"),\n dict(pkg=\"IPython.display\", tgt=\"display\"),\n dict(pkg=\"IPython.display\", tgt=\"HTML\"),\n dict(pkg=\"IPython.display\", tgt=\"Markdown\"),\n dict(pkg=\"ipywidgets\", alias=\"widgets\"),\n dict(pkg=\"pathlib\", tgt=\"Path\"),\n dict(pkg=\"matplotlib.pyplot\", alias=\"plt\"),\n dict(pkg=\"matplotlib\", tgt=\"MatplotlibDeprecationWarning\"),\n dict(pkg=\"numpy\", alias=\"np\"),\n]\nif sns is not None:\n _NB_IMPORTS.append(dict(pkg=\"seaborn\", alias=\"sns\"))\n\n_MP_IMPORTS = [\n dict(pkg=\"msticpy\"),\n dict(pkg=\"msticpy.data\", tgt=\"QueryProvider\"),\n dict(pkg=\"msticpy.nbtools.foliummap\", tgt=\"FoliumMap\"),\n dict(pkg=\"msticpy.common.utility\", tgt=\"md\"),\n dict(pkg=\"msticpy.common.utility\", tgt=\"md_warn\"),\n dict(pkg=\"msticpy.common.wsconfig\", tgt=\"WorkspaceConfig\"),\n dict(pkg=\"msticpy.datamodel.pivot\", tgt=\"Pivot\"),\n dict(pkg=\"msticpy.datamodel\", tgt=\"entities\"),\n dict(pkg=\"msticpy.vis\", tgt=\"mp_pandas_plot\"),\n]\n_MP_IMPORT_ALL = [\n dict(module_name=\"msticpy.nbtools\"),\n dict(module_name=\"msticpy.sectools\"),\n]\n\n_CONF_URI = (\n \"https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html\"\n)\n\n_AZNB_GUIDE = (\n \"Please run the <i>Getting Started Guide for Azure Sentinel \"\n + \"ML Notebooks</i> notebook.\"\n)\n_AZ_CLI_WIKI_URI = (\n \"https://github.com/Azure/Azure-Sentinel-Notebooks/wiki/\"\n \"Caching-credentials-with-Azure-CLI\"\n)\n_CLI_WIKI_MSSG_GEN = (\n f\"For more information see <a href='{_AZ_CLI_WIKI_URI}'>\"\n \"Caching credentials with Azure CLI</>\"\n)\n_CLI_WIKI_MSSG_SHORT = (\n f\"see <a href='{_AZ_CLI_WIKI_URI}'>Caching credentials with Azure CLI</>\"\n)\n\ncurrent_providers: Dict[str, Any] = {} # pylint: disable=invalid-name\n\n\ndef _pr_output(*args):\n \"\"\"Output to IPython display or print.\"\"\"\n if not _VERBOSITY():\n return\n if is_ipython():\n display(HTML(\" \".join([*args, \"<br>\"]).replace(\"\\n\", \"<br>\")))\n else:\n print(*args)\n\n\ndef _err_output(*args):\n \"\"\"Output to IPython display or print - always output regardless of verbosity.\"\"\"\n if is_ipython():\n display(HTML(\" \".join([*args, \"<br>\"]).replace(\"\\n\", \"<br>\")))\n else:\n print(*args)\n\n\ndef init_notebook(\n namespace: Dict[str, Any],\n def_imports: str = \"all\",\n additional_packages: List[str] = None,\n extra_imports: List[str] = None,\n **kwargs,\n) -> bool:\n \"\"\"\n Initialize the notebook environment.\n\n Parameters\n ----------\n namespace : Dict[str, Any]\n Namespace (usually globals()) into which imports\n are to be populated.\n def_imports : str, optional\n Import default packages. By default \"all\".\n Possible values are:\n - \"all\" - import all packages\n - \"nb\" - import common notebook packages\n - \"msticpy\" - import msticpy packages\n - \"none\" (or any other value) don't load any default packages.\n additional_packages : List[str], optional\n Additional packages to be pip installed,\n by default None.\n Packages are specified by name only or version\n specification (e.g. \"pandas>=0.25\")\n user_install : bool, optional\n Install packages in the \"user\" rather than system site-packages.\n Use this option if you cannot or do not want to update the system\n packages.\n You should usually avoid using this option with standard Conda environments.\n extra_imports : List[str], optional\n Additional import definitions, by default None.\n Imports are specified as up to 3 comma-delimited values\n in a string:\n \"{source_pkg}, [{import_tgt}], [{alias}]\"\n `source_pkg` is mandatory - equivalent to a simple \"import xyz\"\n statement.\n `{import_tgt}` specifies an object to import from the package\n equivalent to \"from source_pkg import import_tgt\"\n `alias` allows renaming of the imported object - equivalent to\n the \"as alias\" part of the import statement.\n If you want to provide just `source_pkg` and `alias` include\n an additional placeholder comma: e.g. \"pandas, , pd\"\n friendly_exceptions : Optional[bool]\n Setting this to True causes msticpy to hook the notebook\n exception hander. Any exceptions derived from MsticpyUserException\n are displayed but do not produce a stack trace, etc.\n Defaults to system/user settings if no value is supplied.\n verbose : Union[int, bool], optional\n Controls amount if status output, by default 1\n 0 = No output\n 1 or False = Brief output (default)\n 2 or True = Detailed output\n no_config_check : bool, optional\n Skip the check for valid configuration. Default is False.\n verbosity : int, optional\n\n\n Returns\n -------\n bool\n True if successful\n\n Raises\n ------\n MsticpyException\n If extra_imports data format is incorrect.\n If package with required version check has no version\n information.\n\n \"\"\"\n global current_providers # pylint: disable=global-statement, invalid-name\n\n check_kwargs(\n kwargs,\n [\n \"user_install\",\n \"friendly_exceptions\",\n \"no_config_check\",\n \"verbosity\",\n \"verbose\",\n ],\n )\n user_install: bool = kwargs.pop(\"user_install\", False)\n friendly_exceptions: Optional[bool] = kwargs.pop(\"friendly_exceptions\", None)\n no_config_check: bool = kwargs.pop(\"no_config_check\", False)\n\n _set_verbosity(**kwargs)\n\n _pr_output(\"<hr><h4>Starting Notebook initialization...</h4>\")\n # Check Azure ML environment\n if is_in_aml():\n check_versions_aml(*_get_aml_globals(namespace))\n else:\n # If not in AML check and print version status\n stdout_cap = io.StringIO()\n with redirect_stdout(stdout_cap):\n check_version()\n _pr_output(stdout_cap.getvalue())\n\n # Handle required packages and imports\n _pr_output(\"Processing imports....\")\n imp_ok = _global_imports(\n namespace, additional_packages, user_install, extra_imports, def_imports\n )\n\n # Configuration check\n if no_config_check:\n conf_ok = True\n else:\n _pr_output(\"Checking configuration....\")\n conf_ok = _get_or_create_config()\n _check_azure_cli_status()\n\n # Notebook options\n _pr_output(\"Setting notebook options....\")\n _set_nb_options(namespace)\n\n # Set friendly exceptions\n if friendly_exceptions is None:\n friendly_exceptions = get_config(\"msticpy.FriendlyExceptions\")\n if friendly_exceptions:\n if _VERBOSITY() == 2: # type: ignore\n _pr_output(\"Friendly exceptions enabled.\")\n InteractiveShell.showtraceback = _hook_ipython_exceptions(\n InteractiveShell.showtraceback\n )\n\n # load pivots\n stdout_cap = io.StringIO()\n with redirect_stdout(stdout_cap):\n _load_pivots(namespace=namespace)\n _pr_output(stdout_cap.getvalue())\n\n # User defaults\n stdout_cap = io.StringIO()\n with redirect_stdout(stdout_cap):\n prov_dict = load_user_defaults()\n _pr_output(stdout_cap.getvalue())\n\n if prov_dict:\n namespace.update(prov_dict)\n current_providers = prov_dict\n _pr_output(\"Autoloaded components:\", \", \".join(prov_dict.keys()))\n\n # show any warnings\n init_status = _show_init_warnings(imp_ok, conf_ok)\n _pr_output(\"<h4>Notebook initialization complete</h4>\")\n return init_status\n\n\ndef _show_init_warnings(imp_ok, conf_ok):\n if imp_ok and conf_ok:\n return True\n md(\"<font color='orange'><h3>Notebook setup completed with some warnings.</h3>\")\n if not imp_ok:\n md(\"One or more libraries did not import successfully.\")\n md(_AZNB_GUIDE)\n if not conf_ok:\n md(\"One or more configuration items were missing or set incorrectly.\")\n md(\n _AZNB_GUIDE\n + f\" and the <a href='{_CONF_URI}'>msticpy configuration guide</a>.\"\n )\n md(\"This notebook may still run but with reduced functionality.\")\n return False\n\n\ndef _set_verbosity(**kwargs):\n \"\"\"Set verbosity of output from boolean or int `verbose` param.\"\"\"\n verbosity = 1\n verb_param = kwargs.pop(\"verbose\", kwargs.pop(\"verbosity\", 1))\n if isinstance(verb_param, bool):\n verbosity = 2 if verb_param else 1\n elif isinstance(verb_param, int):\n verbosity = min(2, max(0, verb_param))\n _VERBOSITY(verbosity)\n\n\ndef list_default_imports():\n \"\"\"List the default imports for `init_notebook`.\"\"\"\n for imp_group in (_NB_IMPORTS, _MP_IMPORTS):\n for imp_item in imp_group:\n if \"tgt\" in imp_item:\n import_line = f\"from {imp_item['pkg']} import {imp_item['tgt']}\"\n else:\n import_line = f\"import {imp_item['pkg']}\"\n if \"alias\" in imp_item:\n import_line += f\" as {imp_item['alias']}\"\n _pr_output(import_line)\n for imp_item in _MP_IMPORT_ALL:\n _pr_output(f\"from {imp_item['module_name']} import *\")\n\n\ndef _extract_pkg_name(\n imp_pkg: Optional[Dict[str, str]] = None,\n pkg: str = None,\n tgt: str = None,\n alias: str = None,\n) -> str:\n \"\"\"Return string representation of package import.\"\"\"\n if imp_pkg:\n pkg = imp_pkg.get(\"pkg\")\n tgt = imp_pkg.get(\"tgt\")\n alias = imp_pkg.get(\"alias\")\n import_item = f\"{pkg}.{tgt}\" if tgt else pkg\n if alias:\n import_item = f\"{alias} ({import_item})\"\n return import_item # type: ignore\n\n\nPY_VER_VAR = \"REQ_PYTHON_VER\"\nMP_VER_VAR = \"REQ_MSTICPY_VER\"\nMP_EXTRAS = \"REQ_MP_EXTRAS\"\n\n\ndef _get_aml_globals(namespace: Dict[str, Any]):\n \"\"\"Return global values if found.\"\"\"\n py_ver = namespace.get(PY_VER_VAR, \"3.6\")\n mp_ver = namespace.get(MP_VER_VAR, __version__)\n extras = namespace.get(MP_EXTRAS)\n return py_ver, mp_ver, extras\n\n\ndef _global_imports(\n namespace: Dict[str, Any],\n additional_packages: List[str] = None,\n user_install: bool = False,\n extra_imports: List[str] = None,\n def_imports: str = \"all\",\n):\n import_list = []\n imports = _build_import_list(def_imports)\n\n try:\n for imp_pkg in imports:\n _imp_from_package(nm_spc=namespace, **imp_pkg)\n import_list.append(_extract_pkg_name(imp_pkg))\n _check_and_reload_pkg(namespace, pd, _PANDAS_REQ_VERSION, \"pd\")\n\n if additional_packages:\n pkg_success = check_and_install_missing_packages(\n additional_packages, user=user_install\n )\n if not pkg_success:\n _err_output(\"One or more packages failed to install.\")\n _err_output(\n \"Please re-run init_notebook() with the parameter user_install=True.\"\n )\n # We want to force import lib to see anything that we've\n # just installed.\n importlib.invalidate_caches()\n if extra_imports:\n import_list.extend(\n _import_extras(nm_spc=namespace, extra_imports=extra_imports)\n )\n\n _pr_output(\"Imported:\", \", \".join(imp for imp in import_list if imp))\n return True\n except ImportError as imp_err:\n display(HTML(_IMPORT_ERR_MSSG.format(err=imp_err)))\n return False\n\n\ndef _build_import_list(def_imports: str) -> List[Dict[str, str]]:\n imports = []\n if def_imports.casefold() in [\"all\", \"nb\"]:\n imports.extend(_NB_IMPORTS)\n if def_imports.casefold() in [\"all\", \"msticpy\"]:\n imports.extend(_MP_IMPORTS)\n imports.extend(_MP_IMPORT_ALL)\n return imports\n\n\n_AZ_SENT_ERRS = [\n \"Missing or empty 'AzureSentinel' section\",\n \"Missing or empty 'Workspaces' key in 'AzureSentinel' section\",\n]\n\n\ndef _verify_no_azs_errors(errs):\n \"\"\"Verify none of the Microsoft Sentinel errors appear in `errs`.\"\"\"\n return all(az_err not in errs for az_err in _AZ_SENT_ERRS)\n\n\ndef _get_or_create_config() -> bool:\n # Cases\n # 1. Env var set and mpconfig exists -> goto 4\n # 2. Env var set and mpconfig file not exists - warn and continue\n # 3. search_for_file finds mpconfig -> goto 4\n # 4. if file and check_file_contents -> return ok\n # 5. search_for_file(config.json)\n # 6. If config.json -> import into mpconfig and save\n # 7. Error - no Microsoft Sentinel config\n mp_path = os.environ.get(\"MSTICPYCONFIG\")\n if mp_path and not Path(mp_path).is_file():\n _err_output(_MISSING_MPCONFIG_ENV_ERR)\n if not mp_path or not Path(mp_path).is_file():\n mp_path = search_for_file(\"msticpyconfig.yaml\", paths=[\".\", \"..\"])\n\n if mp_path:\n errs: List[str] = []\n try:\n std_out_cap = io.StringIO()\n with redirect_stdout(std_out_cap):\n errs, _ = validate_config(config_file=mp_path)\n if errs:\n _pr_output(std_out_cap.getvalue())\n if _verify_no_azs_errors(errs):\n # If the mpconfig has a Microsoft Sentinel config, return here\n return True\n # pylint: disable=broad-except\n except Exception as err:\n errs.append(f\"Exception while checking configuration:\\n{err}\")\n _pr_output(f\"Exception while checking configuration:\\n{type(err)} - {err}\")\n _pr_output(\"\\n\".join(traceback.format_tb(err.__traceback__)))\n _pr_output(\"Please report this to [email protected]\")\n # pylint: enable=broad-except\n\n # Look for a config.json\n config_json = search_for_file(\"config.json\", paths=[\".\", \"..\"])\n if config_json:\n # if we found one, use it to populate msticpyconfig.yaml\n _populate_config_to_mp_config(mp_path, config_json)\n return True\n\n _pr_output(\"No valid configuration for Microsoft Sentinel found.\")\n return False\n\n\ndef _populate_config_to_mp_config(mp_path, config_json):\n \"\"\"Populate new or existing msticpyconfig with settings from config.json.\"\"\"\n mp_path = mp_path or \"./msticpyconfig.yaml\"\n mp_config_convert = MpConfigFile(file=config_json)\n azs_settings = mp_config_convert.map_json_to_mp_ws()\n def_azs_settings = next(\n iter(azs_settings.get(\"AzureSentinel\", {}).get(\"Workspaces\", {}).values())\n )\n if def_azs_settings:\n mp_config_convert.settings[\"AzureSentinel\"][\"Workspaces\"][\n \"Default\"\n ] = def_azs_settings.copy()\n mssg = f\"Created '{mp_path}'' with Microsoft Sentinel settings.\"\n if Path(mp_path).exists():\n # If there is an existing file read it in\n mp_config_text = Path(mp_path).read_text(encoding=\"utf-8\")\n mp_config_settings = yaml.safe_load(mp_config_text)\n # update exist settings with the AzSent settings from config.json\n mp_config_settings.update(mp_config_convert.settings)\n # update MpConfigFile with the merged settings\n mp_config_convert.settings = mp_config_settings\n mssg = f\"Updated '{mp_path}'' with Microsoft Sentinel settings.\"\n # Save the file\n mp_config_convert.save_to_file(mp_path, backup=True)\n _pr_output(mssg)\n\n\ndef _set_nb_options(namespace):\n namespace[\"WIDGET_DEFAULTS\"] = {\n \"layout\": widgets.Layout(width=\"95%\"),\n \"style\": {\"description_width\": \"initial\"},\n }\n\n # Some of our dependencies (networkx) still use deprecated Matplotlib\n # APIs - we can't do anything about it, so suppress them from view\n warnings.simplefilter(\"ignore\", category=MatplotlibDeprecationWarning)\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n if sns:\n sns.set()\n pd.set_option(\"display.max_rows\", 100)\n pd.set_option(\"display.max_columns\", 50)\n pd.set_option(\"display.max_colwidth\", 100)\n\n os.environ[\"KQLMAGIC_LOAD_MODE\"] = \"silent\"\n # Kqlmagic config will use AZ CLI login if available\n kql_config = os.environ.get(\"KQLMAGIC_CONFIGURATION\", \"\")\n if \"try_azcli_login\" not in kql_config:\n kql_config = \";\".join([kql_config, \"try_azcli_login=True\"])\n os.environ[\"KQLMAGIC_CONFIGURATION\"] = kql_config\n\n\ndef _load_pivots(namespace):\n \"\"\"Load pivot functions.\"\"\"\n if not Pivot.current:\n pivot = Pivot()\n namespace[\"pivot\"] = pivot\n\n vt_pivot = None\n try:\n get_config(\"TIProviders.VirusTotal\")\n try:\n vt_pivot = importlib.import_module(\"msticpy.sectools.vtlookupv3.vt_pivot\")\n namespace[\"vt_pivot\"] = vt_pivot\n except ImportError:\n # Importing Vt3 libraries failed.\n pass\n except KeyError:\n # No VT settings detected\n pass\n if vt_pivot:\n vt_pivot.add_pivot_functions()\n\n\ndef _import_extras(nm_spc: Dict[str, Any], extra_imports: List[str]):\n added_imports = []\n if isinstance(extra_imports, str):\n extra_imports = [extra_imports]\n for imp_spec in extra_imports:\n params: List[Optional[str]] = [None, None, None]\n for idx, param in enumerate(imp_spec.split(\",\")):\n params[idx] = param.strip() or None\n\n if params[0] is None:\n raise MsticpyException(\n f\"First parameter in extra_imports is mandatory: {imp_spec}\"\n )\n _imp_from_package(nm_spc=nm_spc, pkg=params[0], tgt=params[1], alias=params[2])\n added_imports.append(\n _extract_pkg_name(pkg=params[0], tgt=params[1], alias=params[2])\n )\n return added_imports\n\n\ndef _imp_module(nm_spc: Dict[str, Any], module_name: str, alias: str = None):\n \"\"\"Import named module and assign to global alias.\"\"\"\n try:\n mod = importlib.import_module(module_name)\n except ImportError:\n _err_output(_IMPORT_MODULE_MSSG.format(module=module_name))\n return None\n if alias:\n nm_spc[alias] = mod\n else:\n nm_spc[module_name] = mod\n if _VERBOSITY() == 2: # type: ignore\n _pr_output(f\"{module_name} imported (alias={alias})\")\n return mod\n\n\ndef _imp_module_all(nm_spc: Dict[str, Any], module_name):\n \"\"\"Import all from named module add to globals.\"\"\"\n try:\n imported_mod = importlib.import_module(module_name)\n except ImportError:\n _err_output(_IMPORT_MODULE_MSSG.format(module=module_name))\n return\n for item in dir(imported_mod):\n if item.startswith(\"_\"):\n continue\n nm_spc[item] = getattr(imported_mod, item)\n if _VERBOSITY() == 2: # type: ignore\n _pr_output(f\"All items imported from {module_name}\")\n\n\ndef _imp_from_package(\n nm_spc: Dict[str, Any], pkg: str, tgt: str = None, alias: str = None\n):\n \"\"\"Import object or submodule from `pkg`.\"\"\"\n if not tgt:\n return _imp_module(nm_spc=nm_spc, module_name=pkg, alias=alias)\n try:\n # target could be a module\n obj = importlib.import_module(f\".{tgt}\", pkg)\n except ImportError:\n # if not, it must be an attribute (class, func, etc.)\n try:\n mod = importlib.import_module(pkg)\n except ImportError:\n _err_output(_IMPORT_MODULE_MSSG.format(module=pkg))\n return None\n obj = getattr(mod, tgt)\n if alias:\n nm_spc[alias] = obj\n else:\n nm_spc[tgt] = obj\n if _VERBOSITY() == 2: # type: ignore\n _pr_output(f\"{tgt} imported from {pkg} (alias={alias})\")\n return obj\n\n\ndef _check_and_reload_pkg(\n nm_spc: Dict[str, Any], pkg: Any, req_version: Tuple[int, ...], alias: str = None\n):\n \"\"\"Check package version matches required version and reload.\"\"\"\n warn_mssg = []\n pkg_name = pkg.__name__\n if not hasattr(pkg, \"__version__\"):\n raise MsticpyException(f\"Package {pkg_name} has no version data.\")\n pkg_version = tuple(int(v) for v in pkg.__version__.split(\".\"))\n if pkg_version < req_version:\n _err_output(_MISSING_PKG_WARN.format(package=pkg_name))\n resp = (\n input(\"Install the package now? (y/n)\") if not unit_testing() else \"y\"\n ) # nosec\n if resp.casefold().startswith(\"y\"):\n warn_mssg.append(f\"{pkg_name} was installed or upgraded.\")\n pip_ver = \".\".join(str(elem) for elem in req_version)\n pkg_spec = f\"{pkg_name}>={pip_ver}\"\n check_and_install_missing_packages(required_packages=[pkg_spec], user=True)\n\n if pkg_name in sys.modules:\n importlib.reload(pkg)\n else:\n _imp_module(nm_spc, pkg_name, alias=alias)\n if _VERBOSITY() == 2: # type: ignore\n _pr_output(f\"{pkg_name} imported version {pkg.__version__}\")\n return warn_mssg\n\n\ndef _hook_ipython_exceptions(func):\n \"\"\"Hooks the `func` and bypasses it if exception is MsticpyUserException.\"\"\"\n\n @wraps(func)\n def showtraceback(*args, **kwargs):\n \"\"\"Replace IPython showtraceback.\"\"\"\n # extract exception type, value and traceback\n e_type, _, _ = sys.exc_info()\n if e_type is not None and issubclass(e_type, MsticpyUserError):\n return None\n # otherwise run the original hook\n return func(*args, **kwargs)\n\n return showtraceback\n\n\ndef _check_azure_cli_status():\n \"\"\"Check for Azure CLI credentials.\"\"\"\n if not unit_testing():\n status, message = check_cli_credentials()\n if status == AzureCliStatus.CLI_OK:\n _pr_output(message)\n elif status == AzureCliStatus.CLI_NOT_INSTALLED:\n _pr_output(\n \"Azure CLI credentials not detected.\" f\" ({_CLI_WIKI_MSSG_SHORT})\"\n )\n elif message:\n _pr_output(\"\\n\".join([message, _CLI_WIKI_MSSG_GEN]))\n"
]
| [
[
"pandas.set_option"
]
]
|
aghezz1/hpipm | [
"937c9d7264ac1624ef5539bb4208a8b7c8d3e140"
]
| [
"examples/python/example_qcqp_getting_started.py"
]
| [
"###################################################################################################\n# #\n# This file is part of HPIPM. #\n# #\n# HPIPM -- High-Performance Interior Point Method. #\n# Copyright (C) 2019 by Gianluca Frison. #\n# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. #\n# All rights reserved. #\n# #\n# The 2-Clause BSD License #\n# #\n# Redistribution and use in source and binary forms, with or without #\n# modification, are permitted provided that the following conditions are met: #\n# #\n# 1. Redistributions of source code must retain the above copyright notice, this #\n# list of conditions and the following disclaimer. #\n# 2. Redistributions in binary form must reproduce the above copyright notice, #\n# this list of conditions and the following disclaimer in the documentation #\n# and/or other materials provided with the distribution. #\n# #\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND #\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR #\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #\n# #\n# Author: Gianluca Frison, gianluca.frison (at) imtek.uni-freiburg.de #\n# #\n###################################################################################################\n\nfrom hpipm_python import *\nfrom hpipm_python.common import *\nimport numpy as np\nimport time\nimport sys\nimport os\n\n\n\n# check that env.sh has been run\nenv_run = os.getenv('ENV_RUN')\nif env_run!='true':\n\tprint('ERROR: env.sh has not been sourced! Before executing this example, run:')\n\tprint('source env.sh')\n\tsys.exit(1)\n\ntravis_run = os.getenv('TRAVIS_RUN')\n#travis_run = 'true'\n\n\n\n# define flags\ncodegen_data = 1; # export qp data in the file ocp_qcqp_data.c for use from C examples\n\n\n\n# dim\nN = 5\nnx = 2\nnu = 1\n\nnbx = nx\nnq = 1\n\ndim = hpipm_ocp_qcqp_dim(N)\n\ndim.set('nx', nx, 0, N) # number of states\ndim.set('nu', nu, 0, N-1) # number of inputs\ndim.set('nbx', nbx, 0) # number of state bounds\ndim.set('nq', nq, N)\n\n# print to shell\n#dim.print_C_struct()\n# codegen\nif codegen_data:\n\tdim.codegen('ocp_qcqp_data.c', 'w')\n\n\n\n# data\nA = np.array([1, 1, 0, 1]).reshape(nx,nx)\nB = np.array([0, 1]).reshape(nx,nu)\n\nQ = np.array([1, 0, 0, 1]).reshape(nx,nx)\nR = np.array([1]).reshape(nu,nu)\nq = np.array([1, 1]).reshape(nx,1)\n\nJx = np.array([1, 0, 0, 1]).reshape(nbx,nx)\nx0 = np.array([1, 1]).reshape(nx,1)\n\nQq = 2*np.eye(nx)\nuq = np.array([0.1])\n\n\n\n# qp\nqp = hpipm_ocp_qcqp(dim)\n\nqp.set('A', A, 0, N-1)\nqp.set('B', B, 0, N-1)\nqp.set('Q', Q, 0, N)\nqp.set('R', R, 0, N-1)\nqp.set('q', q, 0, N)\nqp.set('Jx', Jx, 0)\nqp.set('lx', x0, 0)\nqp.set('ux', x0, 0)\nqp.set('Qq', Qq, N)\nqp.set('uq', uq, N)\n\n# print to shell\n#qp.print_C_struct()\n# codegen\nif codegen_data:\n\tqp.codegen('ocp_qcqp_data.c', 'a')\n\n\n# qp sol\nqp_sol = hpipm_ocp_qcqp_sol(dim)\n\n\n# set up solver arg\n#mode = 'speed_abs'\nmode = 'speed'\n#mode = 'balance'\n#mode = 'robust'\n# create and set default arg based on mode\narg = hpipm_ocp_qcqp_solver_arg(dim, mode)\n\n# create and set default arg based on mode\narg.set('mu0', 1e1)\narg.set('iter_max', 30)\narg.set('tol_stat', 1e-4)\narg.set('tol_eq', 1e-5)\narg.set('tol_ineq', 1e-5)\narg.set('tol_comp', 1e-5)\narg.set('reg_prim', 1e-12)\n\n# codegen\nif codegen_data:\n\targ.codegen('ocp_qcqp_data.c', 'a')\n\n# set up solver\nsolver = hpipm_ocp_qcqp_solver(dim, arg)\n\n\n# solve qp\nstart_time = time.time()\nsolver.solve(qp, qp_sol)\nend_time = time.time()\nif(travis_run!='true'):\n\tprint('solve time {:e}'.format(end_time-start_time))\n\n\nif(travis_run!='true'):\n\tqp_sol.print_C_struct()\n\n# extract and print sol\nif(travis_run!='true'):\n\tprint('u =')\n#u = qp_sol.get_u()\nu = qp_sol.get('u', 0, N)\nfor i in range(N+1):\n\tif(travis_run!='true'):\n\t\tprint(u[i])\n\nif(travis_run!='true'):\n\tprint('x =')\nfor i in range(N+1):\n\ttmp = qp_sol.get('x', i)\n\tif(travis_run!='true'):\n\t\tprint(tmp)\n\nxN = qp_sol.get('x', N)\nif(travis_run!='true'):\n\tprint('quadratic constr')\n\tprint(0.5*np.dot(xN.transpose(),np.dot(Qq,xN)))\n\n# get solver statistics\nstatus = solver.get('status')\nres_stat = solver.get('max_res_stat')\nres_eq = solver.get('max_res_eq')\nres_ineq = solver.get('max_res_ineq')\nres_comp = solver.get('max_res_comp')\niters = solver.get('iter')\nstat = solver.get('stat')\nif(travis_run!='true'):\n\tprint('\\nsolver statistics:\\n')\n\tprint('ipm return = {0:1d}\\n'.format(status))\n\tprint('ipm max res stat = {:e}\\n'.format(res_stat))\n\tprint('ipm max res eq = {:e}\\n'.format(res_eq))\n\tprint('ipm max res ineq = {:e}\\n'.format(res_ineq))\n\tprint('ipm max res comp = {:e}\\n'.format(res_comp))\n\tprint('ipm iter = {0:1d}\\n'.format(iters))\n\tprint('stat =')\n\tprint('\\titer\\talpha_aff\\tmu_aff\\t\\tsigma\\t\\talpha_prim\\talpha_dual\\tmu\\t\\tres_stat\\tres_eq\\t\\tres_ineq\\tres_comp')\n\tfor ii in range(iters+1):\n\t\tprint('\\t{:d}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}\\t{:e}'.format(ii, stat[ii][0], stat[ii][1], stat[ii][2], stat[ii][3], stat[ii][4], stat[ii][5], stat[ii][6], stat[ii][7], stat[ii][8], stat[ii][9]))\n\tprint('')\n\n\n\nif status==0:\n\tprint('\\nsuccess!\\n')\nelse:\n\tprint('\\nSolution failed, solver returned status {0:1d}\\n'.format(status))\n\n\n\nsys.exit(int(status))\n\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.eye"
]
]
|
AitorBengoechea/sandy | [
"c55e4ee7b593389947a52e1adb85282dcb73dda3"
]
| [
"sandy/core/xs.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains all classes and functions specific for the cross section\nclass `Xs` that acts as a container for energy-dependent tabulated cross\nsection values.\n\"\"\"\nimport os\nimport logging\nimport functools\n\nimport numpy as np\nimport pandas as pd\n\nimport sandy\n\n__author__ = \"Luca Fiorito\"\n__all__ = [\n \"Xs\",\n \"redundant_xs\",\n ]\n\npd.options.display.float_format = '{:.5e}'.format\n\nredundant_xs = {\n 107: range(800, 850),\n 106: range(750, 800),\n 105: range(700, 750),\n 104: range(650, 700),\n 103: range(600, 650),\n 101: range(102, 118),\n 18: (19, 20, 21, 38),\n 27: (18, 101),\n 4: range(50, 92),\n 3: (4, 5, 11, 16, 17, *range(22, 38), 41, 42, 44, 45),\n 1: (2, 3),\n 452: (455, 456)\n }\n\n\nclass Xs():\n \"\"\"\n Object for energy dependent cross sections.\n\n Attributes\n ----------\n data : `pandas.DataFrame`\n source of energy dependent tabulated cross sections\n\n Methods\n -------\n reshape\n Interpolate cross sections over new grid structure\n custom_perturbation\n Apply a custom perturbation to a given cross section\n to_endf6\n Update cross sections in `Endf6` instance\n from_endf6\n Extract cross sections/nubar from `Endf6` instance\n \"\"\"\n\n redundant_xs = {\n 107: range(800, 850),\n 106: range(750, 800),\n 105: range(700, 750),\n 104: range(650, 700),\n 103: range(600, 650),\n 101: range(102, 118),\n 18: (19, 20, 21, 38),\n 27: (18, 101),\n 4: range(50, 92),\n 3: (4, 5, 11, 16, 17, *range(22, 38), 41, 42, 44, 45),\n 1: (2, 3),\n 452: (455, 456)\n }\n\n _indexname = \"E\"\n _columnsnames = (\"MAT\", \"MT\")\n\n def __repr__(self):\n return self.data.__repr__()\n\n def __init__(self, *args, **kwargs):\n self.data = pd.DataFrame(*args, dtype=float, **kwargs)\n\n @property\n def data(self):\n \"\"\"\n Dataframe of energy-dependent tabulated cross sections.\n\n Attributes\n ----------\n index : `pandas.Index`\n energy grid in eV\n columns : `pandas.MultiIndex`\n MAT/MT indices\n values : `numpy.array`\n cross sections in barns\n\n Returns\n -------\n `pandas.DataFrame`\n tabulated xs\n\n Raises\n ------\n `sandy.Error`\n if energy grid is not monotonically increasing\n\n Examples\n --------\n >>> index = [1e-5, 2e7]\n >>> columns = pd.MultiIndex.from_tuples([(9437, 1)])\n >>> sandy.Xs([1, 2], index=index, columns=columns)\n MAT 9437\n MT 1\n E \n 1.00000e-05 1.00000e+00\n 2.00000e+07 2.00000e+00\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n self._data = data.rename_axis(self.__class__._indexname, axis=0)\\\n .rename_axis(self.__class__._columnsnames, axis=1)\n self._data.index = self._data.index\n if not data.index.is_monotonic_increasing:\n raise sandy.Error(\"energy grid is not monotonically increasing\")\n\n def reshape(self, eg):\n \"\"\"\n Linearly interpolate cross sections over new grid structure.\n\n Parameters\n ----------\n eg : array-like object\n new energy grid\n\n Returns\n -------\n `Xs`\n cross section instance over new grid\n\n Warnings\n --------\n The new cross sections are tabulated over the union between\n the old and the given energy grid\n \"\"\"\n df = self.data\n enew = df.index.union(eg).astype(\"float\").values\n xsnew = sandy.shared.reshape_differential(\n df.index.values,\n df.values,\n enew,\n )\n df = pd.DataFrame(xsnew, index=enew, columns=df.columns)\n return self.__class__(df)\n\n def custom_perturbation(self, mat, mt, pert):\n \"\"\"\n Apply a custom perturbation to a given cross section identified by\n a MAT and MT number.\n\n Parameters\n ----------\n mat : `int`\n MAT material number of the xs to which perturbations are to be\n applied\n mt : `int`\n MT reaction number of the xs to which perturbations are to be\n applied\n pert : `sandy.Pert`\n tabulated perturbations\n\n Returns\n -------\n `Xs`\n cross section instance with given series MAT/MT perturbed\n \"\"\"\n if (mat, mt) not in self.data:\n msg = f\"could not find MAT{mat}/MT{mt}, \" +\\\n \"perturbation will not be applied\"\n logging.warning(msg)\n u_xs = self\n else:\n enew = np.union1d(self.data.index.values, pert.right.index.values)\n u_xs = self.reshape(enew)\n u_pert = pert.reshape(enew)\n u_xs.data[(mat, mt)] = u_xs.data[(mat, mt)] * u_pert.right.values\n return self.__class__(u_xs.data)\n\n def filter_energies(self, energies):\n mask = self.data.index.isin(energies)\n data = self.data.loc[mask]\n return self.__class__(data)\n\n def to_endf6(self, endf6):\n \"\"\"\n Update cross sections in `Endf6` instance with those available in a\n `Xs` instance.\n\n .. warning:: only xs with `(MAT,MT)` combinations that are originally\n present in the `Endf6` instance are modififed, the others\n are discarded.\n The reason behind this is that to reconstruct a endf6\n section we need info that is not available in the `Xs`\n instance itself.\n\n Parameters\n ----------\n `endf6` : `sandy.Endf6`\n `Endf6` instance\n\n Returns\n -------\n `sandy.Endf6`\n `Endf6` instance with updated xs\n \"\"\"\n endf6new = self._xs_to_endf6(endf6)\n endf6new = self._nubar_to_endf6(endf6new)\n return endf6new\n\n def _nubar_to_endf6(self, endf6):\n data = endf6.data.copy()\n mf = 1\n for (mat, mt), xs in self.data.iteritems():\n # Must read original section to extract info not given in `Xs`\n if (mat, mf, mt) not in endf6.keys:\n continue\n sec = endf6.read_section(mat, mf, mt)\n if sec[\"LNU\"] != 2:\n raise sandy.Error(\"cannot update nubar if not in tabulated \"\n \"format\")\n ethresh = sec[\"E\"][0]\n xs = xs.where(xs.index >= ethresh).dropna()\n sec[\"E\"] = xs.index.values\n sec[\"NU\"] = xs.values\n # Assume only 1 interpolation region and it is linear\n sec[\"NBT\"] = [xs.size]\n sec[\"INT\"] = [2]\n data[mat, mf, mt] = sandy.write_mf1(sec)\n return sandy.Endf6(data)\n\n def _xs_to_endf6(self, endf6):\n data = endf6.data.copy()\n mf = 3\n for (mat, mt), xs in self.data.iteritems():\n # Must read original section to extract info not given in `Xs`\n # instance, e.g. QI, QM\n if (mat, mf, mt) not in endf6.keys:\n continue\n sec = endf6.read_section(mat, mf, mt)\n # Cut threshold xs\n ethresh = sec[\"E\"][0]\n xs = xs.where(xs.index >= ethresh).dropna()\n sec[\"E\"] = xs.index.values\n sec[\"XS\"] = xs.values\n # Assume all xs have only 1 interpolation region and it is linear\n sec[\"NBT\"] = [xs.size]\n sec[\"INT\"] = [2]\n data[mat, mf, mt] = sandy.write_mf3(sec)\n return sandy.Endf6(data)\n\n @classmethod\n def from_endf6(cls, endf6):\n \"\"\"\n Extract cross sections from `Endf6` instance.\n\n .. note:: xs are linearized on a unique grid.\n\n .. note:: missing points are linearly interpolated if inside the energy\n domain, else zero is assigned.\n\n Parameters\n ----------\n `endf6` : `sandy.Endf6`\n `Endf6` instance\n\n Returns\n -------\n `sandy.Xs`\n xs tabulated data\n\n Raises\n ------\n `sandy.Error`\n if interpolation scheme is not lin-lin\n `sandy.Error`\n if requested cross section was not found\n\n Warns\n -----\n `logging.warning`\n if duplicate energy points are found\n\n Notes\n -----\n .. note:: Cross sections are linearized on a unique grid.\n\n .. note:: Missing points are linearly interpolated if inside the energy\n domain, else zero is assigned.\n\n .. note:: Duplicate energy points will be removed, only the first one\n is kept.\n \"\"\"\n data = []\n # read cross sections\n tape = endf6.filter_by(listmf=[3])\n keep = \"first\"\n for mat, mf, mt in tape.data:\n sec = tape.read_section(mat, mf, mt)\n if sec['INT'] != [2]:\n logging.warning(f\"skip MAT{mat}/MF{mf}/MT{mt} \"\n \"because interpolation schme is not lin-lin\")\n continue\n xs = pd.Series(sec[\"XS\"], index=sec[\"E\"], name=(mat, mt)) \\\n .rename_axis(\"E\") \\\n .to_frame()\n mask_duplicates = xs.index.duplicated(keep=keep)\n for energy in xs.index[mask_duplicates]:\n logging.warning(\"found duplicate energy for \"\n f\"MAT{mat}/MF{mf}/MT{mt} \"\n f\"at {energy:.5e} MeV, keep only {keep} value\")\n xs = xs[~mask_duplicates]\n data.append(xs)\n # read nubar\n tape = endf6.filter_by(listmf=[1], listmt=[452, 455, 456])\n keep = \"first\"\n for mat, mf, mt in tape.data:\n sec = tape.read_section(mat, mf, mt)\n if sec[\"LNU\"] != 2:\n logging.warning(f\"skip MAT{mat}/MF{mf}/MT{mt} \"\n \"because not tabulated\")\n continue\n if sec['INT'] != [2]:\n logging.warning(f\"skip MAT{mat}/MF{mf}/MT{mt} \"\n \"because interpolation schme is not lin-lin\")\n continue\n xs = pd.Series(sec[\"NU\"], index=sec[\"E\"], name=(mat, mt)) \\\n .rename_axis(\"E\") \\\n .to_frame()\n mask_duplicates = xs.index.duplicated(keep=keep)\n for energy in xs.index[mask_duplicates]:\n logging.warning(\"found duplicate energy for \"\n f\"MAT{mat}/MF{mf}/MT{mt} \"\n f\"at {energy:.5e} MeV, keep only {keep} value\")\n xs = xs[~mask_duplicates]\n data.append(xs)\n if not data:\n raise sandy.Error(\"cross sections were not found\")\n # should we sort index?\n\n def foo(l, r):\n how = \"outer\"\n return pd.merge(l, r, left_index=True, right_index=True, how=how)\n\n df = functools.reduce(foo, data) \\\n .interpolate(method='slinear', axis=0) \\\n .fillna(0)\n return cls(df)\n\n def _reconstruct_sums(self, drop=True, inplace=False):\n \"\"\"\n Reconstruct redundant xs.\n \"\"\"\n df = self.data.copy()\n for mat in self.data.columns.get_level_values(\"MAT\").unique():\n for parent, daughters in sorted(redundant_xs.items(), reverse=True):\n daughters = [x for x in daughters if x in df[mat]]\n if daughters:\n df[mat,parent] = df[mat][daughters].sum(axis=1)\n # keep only mts present in the original file\n if drop:\n todrop = [x for x in df[mat].columns if x not in self.data[mat].columns]\n cols_to_drop = pd.MultiIndex.from_product([[mat], todrop])\n df.drop(cols_to_drop, axis=1, inplace=True)\n if inplace:\n self.data = df\n else:\n return Xs(df)\n# frame = self.copy()\n# for mat in frame.columns.get_level_values(\"MAT\").unique():\n# for parent, daughters in sorted(Xs.redundant_xs.items(), reverse=True):\n# daughters = [ x for x in daughters if x in frame[mat]]\n# if daughters:\n# frame[mat,parent] = frame[mat][daughters].sum(axis=1)\n# # keep only mts present in the original file\n# if drop:\n# todrop = [ x for x in frame[mat].columns if x not in self.columns.get_level_values(\"MT\") ]\n# frame.drop(pd.MultiIndex.from_product([[mat], todrop]), axis=1, inplace=True)\n# return Xs(frame)\n\n def _perturb(self, pert, method=2, **kwargs):\n \"\"\"Perturb cross sections/nubar given a set of perturbations.\n \n Parameters\n ----------\n pert : pandas.Series\n multigroup perturbations from sandy.XsSamples\n method : int\n * 1 : samples outside the range [0, 2*_mean_] are set to _mean_. \n * 2 : samples outside the range [0, 2*_mean_] are set to 0 or 2*_mean_ respectively if they fall below or above the defined range.\n \n Returns\n -------\n `sandy.formats.utils.Xs`\n \"\"\"\n frame = self.copy()\n for mat in frame.columns.get_level_values(\"MAT\").unique():\n if mat not in pert.index.get_level_values(\"MAT\"):\n continue\n for mt in frame[mat].columns.get_level_values(\"MT\").unique():\n lmtp = pert.loc[mat].index.get_level_values(\"MT\").unique()\n mtPert = None\n if lmtp.max() == 3 and mt >= 3:\n mtPert = 3\n elif mt in lmtp:\n mtPert = mt\n else:\n for parent, daughters in sorted(self.__class__.redundant_xs.items(), reverse=True):\n if mt in daughters and not list(filter(lambda x: x in lmtp, daughters)) and parent in lmtp:\n mtPert = parent\n break\n if not mtPert:\n continue\n P = pert.loc[mat,mtPert]\n P = P.reindex(P.index.union(frame[mat,mt].index)).ffill().fillna(1).reindex(frame[mat,mt].index)\n if method == 2:\n P = P.where(P>0, 0.0)\n P = P.where(P<2, 2.0)\n elif method == 1:\n P = P.where((P>0) & (P<2), 1.0)\n xs = frame[mat,mt].multiply(P, axis=\"index\")\n frame[mat,mt] = xs\n return Xs(frame).reconstruct_sums()\n\n @classmethod\n def _from_errorr(cls, errorr):\n \"\"\"\n Extract cross sections/nubar from ERRORR instance.\n\n Parameters\n ----------\n errorr : `sandy.formats.endf6.Errorr`\n ERRORR instance\n\n Returns\n -------\n `sandy.formats.utils.Xs`\n dataframe of cross sections in ERRORR file\n \"\"\"\n mat = errorr.mat[0]\n eg = errorr.energy_grid\n tape = errorr.filter_by(listmf=[3])\n listxs = []\n for (mat,mf,mt),text in tape.TEXT.iteritems():\n X = tape.read_section(mat, mf, mt)\n xs = pd.Series(\n X[\"XS\"],\n index=errorr.energy_grid[:-1],\n name=(mat,mt)\n ).rename_axis(\"E\").to_frame()\n listxs.append(xs)\n if not listxs:\n logging.warn(\"no xs/nubar data was found\")\n return pd.DataFrame()\n # Use concat instead of merge because indexes are the same\n frame = pd.concat(listxs, axis=1).reindex(eg, method=\"ffill\")\n return Xs(frame)\n\n @classmethod\n def from_file(cls, file, kind=\"endf6\"):\n \"\"\"\n Read cross sections directly from file.\n\n Parameters\n ----------\n file : `str`\n file name with relative or absolute path\n kind : `str`, optional, default is `'endf6'`\n type of file\n\n Returns\n -------\n `sandy.Xs`\n cross sections tabulated data\n\n Examples\n --------\n >>> file = os.path.join(sandy.data.__path__[0], \"h1.pendf\")\n >>> sandy.Xs.from_file(file).data.head()\n MAT 125 \n MT 1 2 102\n E \n 1.00000e-05 3.71363e+01 2.04363e+01 1.66999e+01\n 1.03125e-05 3.68813e+01 2.04363e+01 1.64450e+01\n 1.06250e-05 3.66377e+01 2.04363e+01 1.62013e+01\n 1.09375e-05 3.64045e+01 2.04363e+01 1.59682e+01\n 1.12500e-05 3.61812e+01 2.04363e+01 1.57448e+01\n \"\"\"\n if kind != \"endf6\":\n raise ValueError(\"sandy can only read cross sections from 'endf6' \"\n \"files\")\n tape = sandy.Endf6.from_file(file)\n return cls.from_endf6(tape)\n\n def eV2MeV(self):\n \"\"\"\n Produce dataframe of cross sections with index in MeV instead of eV.\n\n Returns\n -------\n `pandas.DataFrame`\n dataframe of cross sections with enery index in MeV\n\n Examples\n --------\n >>> index = [1e-5, 2e7]\n >>> columns = pd.MultiIndex.from_tuples([(9437, 1)])\n >>> sandy.Xs([1, 2], index=index, columns=columns).eV2MeV()\n MAT 9437\n MT 1\n E \n 1.00000e-11 1.00000e+00\n 2.00000e+01 2.00000e+00\n \"\"\"\n df = self.data.copy()\n df.index = df.index * 1e-6\n return df\n"
]
| [
[
"numpy.union1d",
"pandas.merge",
"pandas.DataFrame",
"pandas.MultiIndex.from_product",
"pandas.concat",
"pandas.Series"
]
]
|
tbennun/dace | [
"484f959c847feee048cd43ae5580f57e67d51671"
]
| [
"tests/numpy/math_test.py"
]
| [
"import numpy as np\nimport dace\n\nM, N = 24, 24\n\n\[email protected]\ndef exponent(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = exp(A)\n\[email protected]\ndef sine(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = sin(A)\n\[email protected]\ndef cosine(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = cos(A)\n\[email protected]\ndef square_root(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = sqrt(A)\n\[email protected]\ndef logarithm(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = log(A)\n\[email protected]\ndef conjugate(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n B[:] = conj(A)\n\[email protected]\ndef real_part(A: dace.complex64[M, N], B: dace.float32[M, N]):\n B[:] = real(A)\n\[email protected]\ndef imag_part(A: dace.complex64[M, N], B: dace.float32[M, N]):\n B[:] = imag(A)\n\[email protected]\ndef exponent_m(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n for i in dace.map[0:M]:\n B[i] = exp(A[i])\n\[email protected]\ndef exponent_t(A: dace.complex64[M, N], B: dace.complex64[M, N]):\n for i, j in dace.map[0:M, 0:N]:\n B[i, j] = exp(A[i, j])\n\n\nif __name__ == '__main__':\n A = np.random.rand(M, N).astype(np.float32) + 1j*np.random.rand(M, N).astype(np.float32)\n\n def validate(program, func, op, restype=None):\n if restype is None:\n restype = op.dtype \n daceB = np.zeros([M, N], dtype=restype)\n exec('{p}(op, daceB)'.format(p=program))\n numpyB = daceB.copy()\n exec('numpyB[:] = np.{f}(op)'.format(f=func))\n relerr = np.linalg.norm(numpyB - daceB) / np.linalg.norm(numpyB)\n print('Relative error:', relerr)\n assert relerr < 1e-5\n \n for p, f in {('exponent', 'exp'), ('sine', 'sin'), ('cosine', 'cos'),\n ('square_root', 'sqrt'), ('logarithm', 'log'),\n ('conjugate', 'conj')}:\n validate(p, f, A)\n \n for p, f in {('real_part', 'real'), ('imag_part', 'imag')}:\n validate(p, f, A, restype=np.float32)\n \n validate('exponent_m', 'exp', A)\n validate('exponent_t', 'exp', A)\n"
]
| [
[
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.zeros"
]
]
|
HaydenAI/motpy | [
"da0a21c6a47d520d1f1edffce52e842121f20eb6"
]
| [
"examples/mot16_challange.py"
]
| [
"import os\nimport random\nimport time\n\nimport cv2\nimport fire\nimport pandas as pd\nfrom motpy import Detection, MultiObjectTracker\nfrom motpy.core import setup_logger\nfrom motpy.testing_viz import draw_detection, draw_track\nfrom motpy.utils import ensure_packages_installed\n\nensure_packages_installed(['cv2', 'pandas'])\n\n\n\"\"\" MOT16 tracking demo\n\n Usage:\n python examples/mot16_challange.py --dataset_root=~/Downloads/MOT16 --seq_id=11\n\n Note: this is just a demo, the script does not evaluate the tracking on MOT16 dataset.\n Also, since provided by MOT16 `predictions` do not represent (IMO) the current state\n of modern detectors, the demo utilizes ground truth + noise as input to the tracker;\n feel free to use `sel=det` to check the 'real' MOT16 predictions, but keep in mind that\n tracker is not optimized at all for such noisy predictions. \"\"\"\n\nlogger = setup_logger(__name__, is_main=True)\n\nCOL_NAMES = ['frame_idx', 'id', 'bb_left', 'bb_top', 'bb_width', 'bb_height', 'conf', 'x', 'y', 'z']\n\nALLOWED_SEQ_IDS = set(['02', '04', '05', '09', '10', '11', '13'])\n\n\ndef _read_video_frame(directory, frame_idx):\n \"\"\" reads MOT16 formatted frame \"\"\"\n fname = f'{frame_idx:06}.jpg'\n fpath = os.path.join(directory, fname)\n return cv2.imread(fpath)\n\n\ndef read_detections(path, drop_detection_prob: float = 0.0, add_detection_noise: float = 0.0):\n \"\"\" parses and converts MOT16 benchmark annotations to known [xmin, ymin, xmax, ymax] format \"\"\"\n path = os.path.expanduser(path)\n logger.debug(f'reading detections from {path}')\n if not os.path.isfile(path):\n raise ValueError('file does not exist')\n\n df = pd.read_csv(path, names=COL_NAMES)\n\n max_frame = df.frame_idx.max()\n for frame_idx in range(max_frame):\n detections = []\n for _, row in df[df.frame_idx == frame_idx].iterrows():\n if random.random() < drop_detection_prob:\n continue\n\n box = [row.bb_left, row.bb_top,\n row.bb_left + row.bb_width,\n row.bb_top + row.bb_height]\n\n if add_detection_noise > 0:\n for i in range(4):\n box[i] += random.uniform(-add_detection_noise, add_detection_noise)\n\n detections.append(Detection(box=box))\n\n yield frame_idx, detections\n\n\ndef get_miliseconds():\n return int(round(time.time() * 1000))\n\n\ndef run(\n dataset_root: str,\n fps: float = 30.0,\n split: str = 'train',\n seq_id: str = '04',\n sel: str = 'gt',\n drop_detection_prob: float = 0.1,\n add_detection_noise: float = 5.0):\n \"\"\" parses detections, loads frames, runs tracking and visualizes the tracked objects \"\"\"\n\n dataset_root = os.path.expanduser(dataset_root)\n if not os.path.isdir(dataset_root):\n logger.error('%s does not exist' % dataset_root)\n exit(-1)\n\n if str(seq_id) not in ALLOWED_SEQ_IDS:\n logger.error('unknown MOT16 sequence: %s' % str(seq_id))\n exit(-1)\n\n dataset_root2 = f'{dataset_root}/{split}/MOT16-{seq_id}'\n frames_dir = f'{dataset_root2}/img1'\n logger.info(f'reading video frames from {frames_dir}')\n\n dets_path = f'{dataset_root2}/{sel}/{sel}.txt'\n dets_gen = read_detections(\n dets_path,\n drop_detection_prob=drop_detection_prob,\n add_detection_noise=add_detection_noise)\n\n tracker = MultiObjectTracker(\n dt=1 / fps, tracker_kwargs={'max_staleness': 15},\n model_spec='constant_acceleration_and_static_box_size_2d',\n matching_fn_kwargs={'min_iou': 0.25})\n\n # tracking loop\n while True:\n # read detections for a given frame\n try:\n frame_idx, detections = next(dets_gen)\n except Exception as e:\n logger.warning('finished reading the sequence')\n logger.debug(f'exception: {e}')\n break\n\n # read the frame for a given index\n frame = _read_video_frame(frames_dir, frame_idx)\n if frame is None:\n continue\n\n # provide the MOT tracker with predicted detections\n t1 = get_miliseconds()\n active_tracks = tracker.step(detections)\n ms_elapsed = get_miliseconds() - t1\n logger.debug('step duration: %dms' % ms_elapsed)\n\n # visualize predictions and tracklets\n for det in detections:\n draw_detection(frame, det)\n\n for track in active_tracks:\n draw_track(frame, track)\n\n cv2.imshow('preview', frame)\n\n # stop execution on q\n key = cv2.waitKey(int(1000 / fps))\n if key == ord('q'):\n logger.info('early stopping')\n break\n\n\nif __name__ == \"__main__\":\n fire.Fire(run)\n"
]
| [
[
"pandas.read_csv"
]
]
|
Shetty073/tv-shows-ratings-visualization | [
"eb1a633f3e071b79e6482e177e0dfc76c25d3366"
]
| [
"plot.py"
]
| [
"import matplotlib.pyplot as plt\n\n# set the size of the graph image\nplt.rcParams[\"figure.figsize\"] = (19, 11)\n\n\ndef plot(p_type: str, x_axis: list, points: list, title: str, x_label: str, y_label: str, y_min_lim=0, y_max_lim=0,\n color='blue', marker='D',\n width=0.30):\n # if y_min_lim and y_max_lim are not provided then calculate them on the basis of the min and max values in\n # points variable\n if y_min_lim == 0 and y_max_lim == 0:\n y_min_lim = int(round(min(points) - 1))\n y_max_lim = int(round(max(points) + 1))\n plt.title(title)\n plt.xlabel(x_label, fontsize=3)\n plt.ylabel(y_label)\n if p_type == 'plot':\n plt.plot(x_axis, points, marker=marker, color=color)\n elif p_type == 'stem':\n plt.stem(x_axis, points, use_line_collection=True)\n elif p_type == 'bar':\n plt.bar(x_axis, points, width=width, color=color)\n # rotate the labels on x-axis\n plt.xticks(rotation=90)\n # set the minimum and maximum value of y-axis\n axes = plt.gca()\n axes.set_ylim([y_min_lim, y_max_lim])\n\n # Save the graph\n plt.savefig(f'graphs/{title}.png', bbox_inches='tight')\n # NOTE: If we don't place plt.show() after plt.savefig() then it will save all the graphs in one image\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.stem",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
]
|
abeermohamed1/ArabicNER | [
"d38225e2543632924171dfe996ff2c6d8930dca2"
]
| [
"abnlp/encoder/LM.py"
]
| [
"\"\"\"\n.. module:: LM\n :synopsis: language modeling\n \n.. moduleauthor:: Liyuan Liu\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nimport torch_scope\n\nclass LM(nn.Module):\n \"\"\"\n The language model model.\n \n Parameters\n ----------\n rnn : ``torch.nn.Module``, required.\n The RNNs network.\n soft_max : ``torch.nn.Module``, required.\n The softmax layer.\n c_num : ``int`` , required.\n The number of words.\n c_dim : ``int`` , required.\n The dimension of word embedding.\n droprate : ``float`` , required\n The dropout ratio.\n label_dim : ``int`` , required.\n The input dimension of softmax. \n \"\"\"\n\n def __init__(self, rnn, c_num, c_dim, droprate):\n\n super(LM, self).__init__()\n\n self.rnn = rnn\n\n self.c_num = c_num\n self.c_dim = c_dim\n\n self.char_embed = nn.Embedding(c_num, c_dim)\n\n self.rnn_output = self.rnn.output_dim\n\n # self.decode_char = nn.Linear(self.rnn_output, c_num)\n # self.decode_type = nn.Linear(self.rnn_output, t_num)\n\n self.drop = nn.Dropout(p=droprate)\n\n self.forward = self.forward_rl\n\n def init_hidden(self):\n \"\"\"\n Initialize hidden states.\n \"\"\"\n self.rnn.init_hidden()\n\n def move_hidden(self, device):\n \"\"\"\n Initialize hidden states.\n \"\"\"\n self.rnn.move_hidden(device)\n\n def forward_rl(self, x):\n \"\"\"\n Calculate the loss.\n \"\"\"\n\n bat_size, seq_len = x['text'].size()\n\n x_emb = self.char_embed(x['text'])\n\n x_emb = self.drop(x_emb)\n\n x_emb = pack_padded_sequence(x_emb, x['len'], batch_first=True)\n rnn_out = self.rnn(x_emb)\n rnn_out, _ = pad_packed_sequence(rnn_out, batch_first = True)\n\n rnn_out = rnn_out.contiguous().view(-1, self.rnn_output)\n select_out = rnn_out.index_select(0, x['pos']).view(bat_size, -1, self.rnn_output)\n\n return select_out\n"
]
| [
[
"torch.nn.Dropout",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pack_padded_sequence"
]
]
|
AayushGrover/text_style_transfer | [
"3b8f3fe3080f60be4c190c8e1d8140b71aa4c492"
]
| [
"src/losses.py"
]
| [
"import torch\nimport torch.nn as nn\n\ndef loss_semantic_meaning(cls_input, cls_generated):\n # Could also use cross entropy, WMD, cosine distance (recommended) or any other distance function\n cos = nn.CosineSimilarity()\n return cos(cls_input, cls_generated)\n\ndef loss_sentiment(sentiment_target, sentiment_predicted):\n # Cross entropy needs the target as the target class\n cross_entropy = nn.CrossEntropyLoss()\n _, sentiment_target = sentiment_target.max(dim=1)\n return cross_entropy(sentiment_predicted, sentiment_target)\n\ndef loss_mse_word_embeddings(input_word_embeddings, output_word_embeddings):\n mse = nn.MSELoss()\n return mse(input_word_embeddings, output_word_embeddings)"
]
| [
[
"torch.nn.MSELoss",
"torch.nn.CosineSimilarity",
"torch.nn.CrossEntropyLoss"
]
]
|
lzharthur/MSML_loss_facenet | [
"d480eec44774c2f5591144e8f32e1ce4ca7ebdc4"
]
| [
"aug_img.py"
]
| [
"#!/usr/bin/env python\n# -*- coding_utf-8 -*-\n\"\"\"\n Author: Zhenghan Lee\n Date:\n\"\"\"\nimport cv2\nimport os\nimport numpy as np\n\ndef flip_img(img):\n return cv2.flip(img, 1) # 1 represent flip horizontal\n\ndef img_adjust_brightness(img,brightness):\n\n return np.uint8(np.clip((1.5 * img + brightness), 0, 255))\n\nif __name__ == '__main__':\n root = '/Users/lees/Desktop/siamese_network/img/lfw'\n num = 1\n for i in os.listdir(root):\n if i == '.DS_Store':\n pass\n else:\n for j in os.listdir(os.path.join(root,i)):\n if j =='.DS_Store':\n pass\n else:\n img_path = os.path.join(root,i,j)\n img = cv2.imread(img_path)\n #for k in [0.2,-0.1]:\n #new_img = img_adjust_brightness(img, k)\n new_img = flip_img(img)\n new_name = j.split('.')[0] + '_new' +'.png'\n cv2.imwrite(os.path.join(root,i,new_name),new_img)\n"
]
| [
[
"numpy.clip"
]
]
|
liangzongchang/bert_seq2seq | [
"e45b63dc2d9611de5b5f89754591473e33b52a9c"
]
| [
"test/test.py"
]
| [
"import torch \nimport torch.nn as nn \nimport sys\nsys.path.append(\"/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq\")\nfrom torch.optim import Adam\nimport pandas as pd\nimport numpy as np\nimport os\nimport json\nimport time\nimport bert_seq2seq\nfrom bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab\nfrom bert_seq2seq.utils import load_bert, load_model_params, load_recent_model\n\nauto_title_model = \"./state_dict/bert_model_poem.bin\"\n\nif __name__ == \"__main__\":\n vocab_path = \"./state_dict/roberta_wwm_vocab.txt\" # roberta模型字典的位置\n # model_name = \"roberta\" # 选择模型名字\n # # model_path = \"./state_dict/bert-base-chinese-pytorch_model.bin\" # roberta模型位\n # # 加载字典\n # word2idx, keep_tokens = load_chinese_base_vocab(vocab_path, simplfied=True)\n # # 定义模型\n # bert_model = load_bert(word2idx, model_name=model_name)\n # load_model_params(bert_model, \"./state_dict/roberta_wwm_pytorch_model.bin\", keep_tokens=keep_tokens)\n\n # for name, params in bert_model.named_parameters():\n # print(name)\n\n checkpoint = torch.load(\"./state_dict/roberta_wwm_pytorch_model.bin\")\n for k, v in checkpoint.items():\n print(k)\n\n\n"
]
| [
[
"torch.load"
]
]
|
jacob720/Savu | [
"7afc9e10ea4944ceb39a83574f3142f025cf81e1"
]
| [
"savu/plugins/utils.py"
]
| [
"# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n.. module:: utils\n :platform: Unix\n :synopsis: Utilities for plugin management\n\n.. moduleauthor:: Mark Basham <[email protected]>\n\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport ast\nimport logging\nimport savu\nimport importlib\nimport inspect\nimport itertools\n\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom savu.plugins.loaders.utils.my_safe_constructor import MySafeConstructor\n\n# can I remove these from here?\n\nload_tools = {}\nplugins = {}\nplugins_path = {}\ndawn_plugins = {}\ncount = 0\n\nOUTPUT_TYPE_DATA_ONLY = 0\nOUTPUT_TYPE_METADATA_ONLY = 1\nOUTPUT_TYPE_METADATA_AND_DATA = 2\n\n\ndef register_plugin(clazz):\n \"\"\"decorator to add plugins to a central register\"\"\"\n plugins[clazz.__name__] = clazz\n if clazz.__module__.split(\".\")[0] != \"savu\":\n plugins_path[clazz.__name__] = clazz.__module__\n return clazz\n\n\ndef dawn_compatible(plugin_output_type=OUTPUT_TYPE_METADATA_AND_DATA):\n def _dawn_compatible(clazz):\n \"\"\"\n decorator to add dawn compatible plugins and details to a central\n register\n \"\"\"\n dawn_plugins[clazz.__name__] = {}\n try:\n plugin_path = sys.modules[clazz.__module__].__file__\n # looks out for .pyc files\n dawn_plugins[clazz.__name__]['path2plugin'] = plugin_path.split('.py')[0] + '.py'\n dawn_plugins[clazz.__name__]['plugin_output_type'] = _plugin_output_type\n except Exception as e:\n print(e)\n return clazz\n\n # for backwards compatibility, if decorator is invoked without brackets...\n if inspect.isclass(plugin_output_type):\n _plugin_output_type = OUTPUT_TYPE_METADATA_AND_DATA\n return _dawn_compatible(plugin_output_type)\n else:\n _plugin_output_type = plugin_output_type\n return _dawn_compatible\n\n\ndef get_plugin(plugin_name, params, exp, check=False):\n \"\"\"Get an instance of the plugin class and populate default parameters.\n\n :param plugin_name: Name of the plugin to import\n :type plugin_name: str.\n :returns: An instance of the class described by the named plugin.\n \"\"\"\n logging.debug(\"Importing the module %s\", plugin_name)\n instance = load_class(plugin_name)()\n instance.initialise(params, exp, check=check)\n return instance\n\n\ndef _get_cls_name(name):\n return \"\".join(x.capitalize() for x in name.split(\".\")[-1].split(\"_\"))\n\n\ndef load_class(name, cls_name=None):\n \"\"\"Returns an instance of the class associated with the module name.\n\n :param name: Module name or path to a module file\n :returns: An instance of the class associated with module.\n \"\"\"\n path = name if os.path.dirname(name) else None\n name = os.path.basename(os.path.splitext(name)[0]) if path else name\n cls_name = _get_cls_name(name) if not cls_name else cls_name\n if cls_name in plugins.keys():\n return plugins[cls_name]\n if path:\n mod = importlib.machinery.SourceFileLoader(name, path).load_module()\n else:\n mod = importlib.import_module(name)\n return getattr(mod, cls_name)\n\n\ndef plugin_loader(exp, plugin_dict, check=False):\n logging.debug(\"Running plugin loader\")\n try:\n plugin = get_plugin(plugin_dict['id'],\n plugin_dict['data'],\n exp,\n check=check)\n except Exception as e:\n logging.error(\"failed to load the plugin\")\n logging.error(e)\n # re-raise the original error\n raise\n\n if check:\n exp.meta_data.plugin_list._set_datasets_list(plugin)\n\n logging.debug(\"finished plugin loader\")\n return plugin\n\n\ndef get_tools_class(plugin_tools_id, cls=None):\n \"\"\"Load the plugin tools class\n\n :param plugin_tools_id: plugin tools module name\n :param cls: Class to initialise\n :return:\n \"\"\"\n if plugin_tools_id == \"savu.plugins.plugin_tools\":\n plugin_tools_id = \"savu.plugins.base_tools\"\n if cls:\n return load_class(plugin_tools_id)(cls)\n else:\n return load_class(plugin_tools_id)\n\n\ndef get_plugins_paths(examples=True):\n \"\"\"\n This gets the plugin paths, but also adds any that are not on the\n pythonpath to it.\n \"\"\"\n plugins_paths = OrderedDict()\n\n # Add the savu plugins paths first so it is overridden by user folders\n savu_plugins_path = os.path.join(savu.__path__[0], 'plugins')\n savu_plugins_subpaths = [d for d in next(os.walk(savu_plugins_path))[1] \\\n if d != \"__pycache__\"]\n for path in savu_plugins_subpaths:\n plugins_paths[os.path.join(savu_plugins_path, path)] = \\\n ''.join(['savu.plugins.', path, '.'])\n\n # get user, environment and example plugin paths\n user_path = [os.path.join(os.path.expanduser(\"~\"), \"savu_plugins\")]\n env_paths = os.getenv(\"SAVU_PLUGINS_PATH\", \"\").replace(\" \", \"\").split(\":\")\n templates = \"../plugin_examples/plugin_templates\"\n eg_path = [os.path.join(savu.__path__[0], templates)] if examples else []\n\n for ppath in env_paths + user_path + eg_path:\n if os.path.exists(ppath):\n plugins_paths[ppath] = os.path.basename(ppath) + \".\"\n if ppath not in sys.path:\n sys.path.append(os.path.dirname(ppath))\n\n return plugins_paths\n\n\ndef is_template_param(param):\n \"\"\"Identifies if the parameter should be included in an input template\n and returns the default value of the parameter if it exists.\n \"\"\"\n start = 0\n ptype = \"local\"\n if isinstance(param, str):\n param = param.strip()\n if not param.split(\"global\")[0]:\n ptype = \"global\"\n start = 6\n first, last = param[start], param[-1]\n if first == \"<\" and last == \">\":\n param = param[start + 1 : -1]\n param = None if not param else param\n try:\n param = eval(param)\n except:\n pass\n return [ptype, param]\n return False\n\n\ndef blockPrint():\n \"\"\" Disable printing to stdout \"\"\"\n import tempfile\n\n fname = tempfile.mkdtemp() + \"/unwanted_prints.txt\"\n sys.stdout = open(fname, \"w\")\n\n\ndef enablePrint():\n \"\"\" Enable printing to stdout \"\"\"\n sys.stdout = sys.__stdout__\n\n\ndef parse_config_string(string):\n regex = r\"[\\[\\]\\, ]+\"\n split_vals = [_f for _f in re.split(regex, string) if _f]\n delimitors = re.findall(regex, string)\n split_vals = [repr(a.strip()) for a in split_vals]\n zipped = itertools.zip_longest(delimitors, split_vals)\n string = \"\".join([i for l in zipped for i in l if i is not None])\n try:\n return ast.literal_eval(string)\n except ValueError:\n return ast.literal_eval(parse_array_index_as_string(string))\n\n\ndef parse_array_index_as_string(string):\n p = re.compile(r\"'\\['\")\n for m in p.finditer(string):\n offset = m.start() - count + 3\n end = string[offset:].index(\"']\") + offset\n string = string[:end] + \"]'\" + string[end + 2 :]\n string = string.replace(\"'['\", \"[\")\n return string\n\n\ndef param_to_str(param_name, keys):\n \"\"\"Check the parameter is within the provided list and\n return the string name.\n \"\"\"\n if param_name.isdigit():\n param_name = int(param_name)\n if param_name <= len(keys):\n param_name = keys[param_name - 1]\n else:\n raise ValueError(\n \"This parameter number is not valid for this plugin\"\n )\n elif param_name not in keys:\n raise Exception(\"This parameter is not present in this plug in.\")\n\n return param_name\n\n\ndef set_order_by_visibility(parameters, level=False):\n \"\"\"Return an ordered list of parameters depending on the\n visibility level\n\n :param parameters: The dictionary of parameters\n :param level: The visibility level\n :return: An ordered list of parameters\n \"\"\"\n data_keys = []\n basic_keys = []\n interm_keys = []\n adv_keys = []\n for k, v in parameters.items():\n if v[\"display\"] == \"on\":\n if v[\"visibility\"] == \"datasets\":\n data_keys.append(k)\n if v[\"visibility\"] == \"basic\":\n basic_keys.append(k)\n if v[\"visibility\"] == \"intermediate\":\n interm_keys.append(k)\n if v[\"visibility\"] == \"advanced\":\n adv_keys.append(k)\n if level:\n if level == \"datasets\":\n keys = data_keys\n elif level == \"basic\":\n keys = basic_keys\n elif level == \"intermediate\":\n keys = basic_keys + interm_keys + data_keys\n elif level == \"advanced\":\n keys = basic_keys + interm_keys + adv_keys + data_keys\n else:\n keys = basic_keys + interm_keys + adv_keys + data_keys\n else:\n keys = basic_keys + interm_keys + adv_keys + data_keys\n\n return keys\n\n\ndef convert_multi_params(param_name, value):\n \"\"\"Check if value is a multi parameter and check if each item is valid.\n Change from the input multi parameter string to a list\n\n :param param_name: Name of the parameter\n :param value: Parameter value\n :return: List or unchanged value\n \"\"\"\n error_str = \"\"\n multi_parameters = (\n isinstance(value, str) and (\";\" in value) and param_name != \"preview\"\n )\n if multi_parameters:\n value = value.split(\";\")\n isdict = re.findall(r\"[\\{\\}]+\", value[0])\n if \":\" in value[0] and not isdict:\n seq = value[0].split(\":\")\n try:\n seq = [ast.literal_eval(s) for s in seq]\n if len(value) == 0:\n error_str = (\n f\"No values for tuned parameter \"\n f\"'{param_name}' ensure start:stop:step; values \"\n f\"are valid\"\n )\n elif len(seq) == 2:\n value = list(np.arange(seq[0], seq[1]))\n elif len(seq) > 2:\n value = list(np.arange(seq[0], seq[1], seq[2]))\n else:\n error_str = \"Ensure start:stop:step; values are valid.\"\n if not value:\n # Don't allow an empty list\n raise ValueError\n except:\n error_str = \"Ensure start:stop:step; values are valid.\"\n val_list = (\n parse_config_string(value) if isinstance(value, str) else value\n )\n # Remove blank list entries\n # Change type to int, float or str\n val_list = [_dumps(val) for val in value if val]\n value = val_list\n return value, error_str\n\n\ndef _dumps(val):\n \"\"\"Replace any missing quotes around variables\n Change the string to an integer, float, tuple, list, str, dict\n \"\"\"\n import yaml\n # Prevent conversion from on/off to boolean\n yaml.SafeLoader.add_constructor(\n \"tag:yaml.org,2002:bool\", MySafeConstructor.add_bool\n )\n if isinstance(val, str):\n try:\n # Safely evaluate an expression node or a string containing\n # a Python literal or container display\n value = ast.literal_eval(val)\n return value\n except Exception:\n pass\n try:\n isdict = re.findall(r\"[\\{\\}]+\", val)\n val = _sexagesimal_check(val, isdict, remove=False)\n value = yaml.safe_load(val)\n return _sexagesimal_check(value, isdict)\n except Exception:\n val = _sexagesimal_check(val, isdict)\n pass\n try:\n isdict = re.findall(r\"[\\{\\}]+\", val)\n # Matches { } between one and unlimited number of times\n if isdict:\n if isinstance(val, dict):\n value_dict = {}\n for k, v in val.items():\n v = v.replace(\"[\", \"'[\").replace(\"]\", \"]'\")\n value_dict[k] = _dumps(\n yaml.safe_load(v)\n )\n return value_dict\n else:\n value = val.replace(\"[\", \"'[\").replace(\"]\", \"]'\")\n return _dumps(yaml.safe_load(value))\n else:\n value = parse_config_string(val)\n return value\n except Exception:\n if len(val.split(\";\")) > 1:\n value = val\n return value\n else:\n raise Exception(\"Invalid string %s\" % val)\n else:\n value = val\n return value\n\n\ndef _sexagesimal_check(val, isdict, remove=True):\n \"\"\"To avoid sexagesimal values being evaluated, replace colon\n values temporarily\n\n :param val:\n :param isdict: True if braces {} found\n :return: value\n \"\"\"\n if isinstance(val, str) and not isdict:\n if remove:\n val = val.replace(\":?\", \":\")\n else:\n val = val.replace(\":\", \":?\")\n return val\n\n\ndef check_valid_dimension(dim, prev_list):\n \"\"\"Check the dimension is within the correct range\"\"\"\n if not 0 < dim < 21:\n raise Exception(\"Please use a dimension between 1 and 20.\")\n if prev_list and (dim > len(prev_list)):\n raise Exception(\n \"You have not specified enough dimensions \"\n \"inside the preview parameter.\"\n )\n return True\n\n\ndef is_slice_notation(value):\n \"\"\"Return True if the value is made up of multiple\"\"\"\n return isinstance(value, str) and (\":\" in value)\n\n\ndef create_dir(file_path):\n \"\"\"Check if directories provided exist at this file path. If they don't\n create the directories.\n \"\"\"\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef indent_multi_line_str(text, indent_level=1, justify=False):\n text = text.split(\"\\n\")\n # Remove additional spacing on the left side so that text aligns\n if justify is False:\n text = [(\" \" * 4 * indent_level) + line for line in text]\n else:\n text = [(\" \" * 4 * indent_level) + line.lstrip() for line in text]\n text = \"\\n\".join(text)\n return text\n\n\ndef indent(text, indent_level=1):\n text = (\" \" * 4 * indent_level) + text\n return text\n\n\ndef sort_alphanum(_list):\n \"\"\"Sort list numerically and alphabetically\n *While maintaining original list value types*\n\n :param _list: Input list to be sorted\n :return: List sorted by number and letter alphabetically\n \"\"\"\n return sorted(_list, key=_alphanum)\n\n\ndef _str_to_int(_str):\n \"\"\"Convert the input str to an int if possible\n\n :param _str: input string\n :return: integer if text is a digit, else string\n \"\"\"\n return int(_str) if _str.isdigit() else _str\n\n\ndef _alphanum(_str):\n \"\"\"Split string into numbers and letters\n\n :param _str:\n :return: list of numbers and letters\n \"\"\"\n char_list = re.split(\"([0-9]+)\", _str)\n return [_str_to_int(c) for c in char_list]\n"
]
| [
[
"numpy.arange"
]
]
|
Cabletutu/analytics-zoo | [
"ee6487f6c126e34647914d5a78ca9bf531695f8d"
]
| [
"pyzoo/zoo/orca/learn/mxnet/mxnet_runner.py"
]
| [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport time\nimport logging\nimport subprocess\nimport ray.services\nimport mxnet as mx\nimport numpy as np\nfrom mxnet import gluon\nfrom functools import reduce\nfrom zoo.ray.utils import to_list\n\n\nclass MXNetRunner(object):\n \"\"\"Manages a MXNet model for training.\"\"\"\n\n def setup_distributed(self, env, config, model_creator, loss_creator=None,\n validation_metrics_creator=None, eval_metrics_creator=None):\n logging.basicConfig(level=logging.INFO) # This can print log messages to console.\n self.logger = logging.getLogger()\n assert isinstance(config, dict), \"config must be a dict\"\n for param in [\"batch_size\", \"optimizer\", \"optimizer_params\", \"log_interval\"]:\n assert param in config, param + \" must be specified in config\"\n self.config = config\n self.model_creator = model_creator\n self.loss_creator = loss_creator\n self.validation_metrics_creator = validation_metrics_creator\n self.eval_metircs_creator = eval_metrics_creator\n self.is_worker = False\n env[\"DMLC_NODE_HOST\"] = self.get_node_ip()\n if env[\"DMLC_ROLE\"] == \"worker\":\n self.is_worker = True\n\n if self.is_worker:\n os.environ.update(env)\n self.kv = mx.kv.create(\"dist_sync\")\n # Set seed so that the model on each worker is initialized with the same weights\n if \"seed\" in self.config:\n mx.random.seed(self.config[\"seed\"])\n\n self.model = self.model_creator(self.config)\n self.loss = self.loss_creator(self.config) if self.loss_creator else None\n self.eval_metrics = self.eval_metircs_creator(self.config) \\\n if self.eval_metircs_creator else None\n self.val_metrics = self.validation_metrics_creator(self.config) \\\n if self.validation_metrics_creator else None\n # For BaseModule, use symbolic API. Otherwise, use imperative API.\n # TODO: change Gluon Trainer to Estimator API?\n if not isinstance(self.model, mx.module.BaseModule):\n assert self.loss, \"Loss not defined for gluon model, please specify loss_creator\"\n self.trainer = gluon.Trainer(self.model.collect_params(), self.config[\"optimizer\"],\n optimizer_params=self.config[\"optimizer_params\"],\n kvstore=self.kv)\n else: # Trainer is not needed for symbolic API.\n self.trainer = None\n else: # server\n # Need to use the environment on each raylet process for the correct python environment.\n # TODO: Need to kill this process manually?\n modified_env = os.environ.copy()\n modified_env.update(env)\n # For servers, just import mxnet and no need to do anything else\n subprocess.Popen(\"python -c 'import mxnet'\", shell=True, env=modified_env)\n\n def train(self, train_data, val_data=None, nb_epoch=1, train_resize_batch_num=None):\n \"\"\"Train the model and update the model parameters.\"\"\"\n stats = dict()\n if self.is_worker:\n from zoo.orca.data.shard import RayPartition\n if isinstance(train_data, RayPartition):\n data, label = get_data_label(train_data.get_data())\n train_data_iter = mx.io.NDArrayIter(data=data, label=label,\n batch_size=self.config[\"batch_size\"],\n shuffle=True)\n if train_resize_batch_num is not None:\n train_data_iter = mx.io.ResizeIter(train_data_iter, train_resize_batch_num)\n if val_data:\n data_val, label_val = get_data_label(val_data.get_data())\n val_data_iter = mx.io.NDArrayIter(data=data_val, label=label_val,\n batch_size=self.config[\"batch_size\"],\n shuffle=True)\n else:\n val_data_iter = None\n else: # data_creator functions; should return Iter or DataLoader\n train_data_iter = train_data(self.config, self.kv)\n val_data_iter = val_data(self.config, self.kv) if val_data else None\n start_time = time.time()\n if self.trainer: # Imperative API\n for epoch in range(nb_epoch):\n train_data_iter.reset()\n if self.eval_metrics:\n self.eval_metrics.reset() # metrics will accumulate for one batch\n batch_start_time = time.time()\n epoch_start_time = time.time()\n for i, batch in enumerate(train_data_iter):\n data = gluon.utils.split_and_load(\n batch.data[0].astype(\"float32\"), ctx_list=[mx.cpu()], batch_axis=0)\n label = gluon.utils.split_and_load(\n batch.label[0].astype(\"float32\"), ctx_list=[mx.cpu()], batch_axis=0)\n outputs = []\n Ls = []\n from mxnet import autograd as ag\n with ag.record():\n for x, y in zip(data, label):\n z = self.model(x) # forward\n L = self.loss(z, y)\n # store the loss and do backward on a batch for better speed\n Ls.append(L)\n outputs.append(z)\n ag.backward(Ls)\n self.trainer.step(batch.data[0].shape[0])\n if self.eval_metrics:\n self.eval_metrics.update(label, outputs)\n if not (i + 1) % self.config[\"log_interval\"]:\n # This would be logged on driver for each worker process.\n iteration_log = \\\n \"Epoch[%d] Batch[%d] Speed: %f samples/sec %s=%f\" \\\n % (epoch, i,\n self.config[\"batch_size\"] / (time.time() - batch_start_time),\n \"loss\", Ls[0].asnumpy().mean())\n if self.eval_metrics:\n names, accs = self.eval_metrics.get()\n names, accs = to_list(names), to_list(accs)\n for name, acc in zip(names, accs):\n iteration_log += \" %s=%f\" % (name, acc)\n self.logger.info(iteration_log)\n batch_start_time = time.time()\n # Epoch time log\n self.logger.info(\"[Epoch %d] time cost: %f\" %\n (epoch, time.time() - epoch_start_time))\n # Epoch metrics log on train data\n if self.eval_metrics:\n epoch_train_log = \"[Epoch %d] training: \" % epoch\n names, accs = self.eval_metrics.get()\n names, accs = to_list(names), to_list(accs)\n for name, acc in zip(names, accs):\n epoch_train_log += \"%s=%f \" % (name, acc)\n self.logger.info(epoch_train_log)\n # Epoch metrics log on validation data if any:\n if val_data_iter:\n self.val_metrics.reset()\n val_data_iter.reset()\n for batch in val_data_iter:\n data = gluon.utils.split_and_load(\n batch.data[0].astype(\"float32\", copy=False),\n ctx_list=[mx.cpu()], batch_axis=0)\n label = gluon.utils.split_and_load(\n batch.label[0].astype(\"float32\", copy=False),\n ctx_list=[mx.cpu()], batch_axis=0)\n outputs = [self.model(X) for X in data]\n self.val_metrics.update(label, outputs)\n epoch_val_log = \"[Epoch %d] validation: \" % epoch\n names, accs = self.val_metrics.get()\n names, accs = to_list(names), to_list(accs)\n for name, acc in zip(names, accs):\n epoch_val_log += \"%s=%f \" % (name, acc)\n self.logger.info(epoch_val_log)\n # TODO: save checkpoints\n if self.eval_metrics:\n names, accs = self.eval_metrics.get()\n names, accs = to_list(names), to_list(accs)\n for name, acc in zip(names, accs):\n stats[name] = acc\n else: # Symbolic API\n # TODO: seems no history (i.e. validation accuracy) returned by fit?\n if \"init\" not in self.config:\n from mxnet.initializer import Uniform\n self.config[\"init\"] = Uniform(0.01) # This is the default value for MXNet\n if self.eval_metrics is None:\n self.eval_metrics = 'acc'\n self.model.fit(train_data=train_data_iter,\n num_epoch=nb_epoch,\n initializer=self.config[\"init\"],\n kvstore=self.kv,\n optimizer=self.config[\"optimizer\"],\n optimizer_params=self.config[\"optimizer_params\"],\n eval_data=val_data_iter,\n eval_metric=self.eval_metrics,\n validation_metric=self.val_metrics,\n batch_end_callback=mx.callback.Speedometer(\n self.config[\"batch_size\"], self.config[\"log_interval\"]),\n epoch_end_callback=None if \"model\" not in self.config\n else mx.callback.do_checkpoint(self.config[\"model\"]))\n epoch_time = time.time() - start_time\n stats[\"epoch_time\"] = epoch_time\n if isinstance(train_data, RayPartition):\n del train_data\n if val_data and isinstance(val_data, RayPartition):\n del val_data\n return stats\n\n def shutdown(self):\n \"\"\"Attempts to shut down the runner.\"\"\"\n del self.logger\n if self.is_worker:\n del self.kv\n del self.model\n del self.trainer\n del self.loss\n del self.eval_metrics\n del self.val_metrics\n\n def get_node_ip(self):\n \"\"\"Returns the IP address of the current node.\"\"\"\n if \"node_ip\" not in self.__dict__:\n self.node_ip = ray.services.get_node_ip_address()\n return self.node_ip\n\n def find_free_port(self):\n \"\"\"Finds a free port on the current node.\"\"\"\n if \"port\" not in self.__dict__:\n from zoo.orca.learn.mxnet.utils import find_free_port\n self.port = find_free_port()\n return self.port\n\n\ndef get_data_label(partition_data):\n def combine_dict(dict1, dict2):\n return {key: np.concatenate((value, dict2[key]), axis=0)\n for (key, value) in dict1.items()}\n\n def combine_list(list1, list2):\n return [np.concatenate((list1[index], list2[index]), axis=0)\n for index in range(0, len(list1))]\n\n data_list = [data['x'] for data in partition_data]\n label_list = [data['y'] for data in partition_data]\n if isinstance(partition_data[0]['x'], dict):\n data = reduce(lambda dict1, dict2: combine_dict(dict1, dict2), data_list)\n elif isinstance(partition_data[0]['x'], np.ndarray):\n data = reduce(lambda array1, array2: np.concatenate((array1, array2), axis=0),\n data_list)\n elif isinstance(partition_data[0]['x'], list):\n data = reduce(lambda list1, list2: combine_list(list1, list2), data_list)\n\n if isinstance(partition_data[0]['y'], dict):\n label = reduce(lambda dict1, dict2: combine_dict(dict1, dict2), label_list)\n elif isinstance(partition_data[0]['y'], np.ndarray):\n label = reduce(lambda array1, array2: np.concatenate((array1, array2), axis=0),\n label_list)\n elif isinstance(partition_data[0]['y'], list):\n label = reduce(lambda list1, list2: combine_list(list1, list2), data_list)\n\n return data, label\n"
]
| [
[
"numpy.concatenate"
]
]
|
jucaleb4/msppy | [
"7e053cc99e805f2fa60675a28481109dfae3eb0b"
]
| [
"msppy/utils/measure.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: lingquan\n\"\"\"\nimport numpy\n\ndef Expectation(obj, grad, p, sense):\n if p is not None:\n return (numpy.dot(p, obj),numpy.dot(p, grad))\n else:\n return (numpy.mean(obj),numpy.mean(grad, axis=0))\n\ndef Expectation_AVaR(obj, grad, p, a, l, sense):\n n_samples, n_states = grad.shape\n if p is None:\n p = numpy.ones(n_samples)/n_samples\n objAvg = numpy.dot(p, obj)\n gradAvg = numpy.dot(p, grad)\n# assert(type(gradAvg) == list and len(gradAvg) == len(p))\n objSortedIndex = numpy.argsort(obj)\n if sense == -1:\n objSortedIndex = objSortedIndex[::-1]\n ## store the index of 1-alpha percentile ##\n tempSum = 0\n for index in objSortedIndex:\n tempSum += p[index]\n if tempSum >= 1 - a:\n kappa = index\n break\n# kappa = objSortedIndex[int((1 - a) * sampleSize)]\n ## obj=(1-lambda)*objAvg+lambda(obj_kappa+1/alpha*avg((obj_kappa - obj_l)+))\n objLP = (1 - l) * objAvg + l * obj[kappa]\n ## grad=(1-lambda)*gradAvg+lambda(grad_kappa+1/alpha*avg((pos))\n gradLP = (1 - l) * gradAvg + l * grad[kappa]\n\n gradTerm = numpy.zeros((n_samples, n_states))\n objTerm = numpy.zeros(n_samples)\n for j in range(n_samples):\n if sense*(obj[j] - obj[kappa]) >= 0:\n gradTerm[j] = sense * (grad[j] - grad[kappa])\n objTerm[j] = sense * (obj[j] - obj[kappa])\n objLP += sense * l * numpy.dot(p, objTerm) / a\n gradLP += sense * l * numpy.dot(p, gradTerm) / a\n return (objLP, gradLP)\n"
]
| [
[
"numpy.dot",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.argsort"
]
]
|
charlesblakemore/sem_processing | [
"1ec8ac4b3eff4886f7a86529c590eed7cc15cbba"
]
| [
"scripts/calibration.py"
]
| [
"import numpy as np\nimport argparse\nimport cv2\n\nimport scipy.optimize as opti\n\nimport matplotlib.pyplot as plt\n\nfrom bead_util import find_all_fnames\nimport sem_util as su\n\n\ngauss_kernel = 10\n\n\nimg_dir = '/Users/manifestation/Stanford/beads/photos/sem/20200624_gbeads-7_5um/'\nsubstr = '7_5um_calibration_15000x_uc'\n\nsavepath = '../calibrations/20200624_{:s}.npy'.format(substr)\n\nplot_threshold = False\nplot_contours = True\n\naverage_concentric_contours = True\n\nimgs, _ = find_all_fnames(img_dir, ext='.tif', substr=substr)\n\n\n\n\n\n\n\ndef distance(p1, p2):\n return np.sqrt( (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 )\n\ndef angle(p1, p2):\n return np.arctan2((p1[1]-p2[1]), (p1[0]-p2[0]))\n\ndef gauss(x, A, mu, sigma):\n return A * np.exp( -(x - mu)**2 / (2 * sigma**2))\n\n\n\n\n\n\nall_dists = []\nfor filename in imgs:\n\n imgobj = su.SEMImage()\n imgobj.load(filename)\n\n imgobj.rough_calibrate(plot=False)\n\n scale_pixels_err = 1.0\n\n\n grating_pixels = 1.0e-6 / imgobj.scale_fac # exact 1um grating\n #grating_pixels = 10.0 / derp_resolution # approx 10um grating\n print(grating_pixels)\n\n\n temp = imgobj.img_arr * (256.0 / (2.0**imgobj.bit_depth))\n blur = cv2.blur(temp.astype(np.uint8),(gauss_kernel,gauss_kernel))\n\n ret, th1 = cv2.threshold(blur,0,255,\\\n cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n\n if plot_threshold:\n plt.figure()\n plt.imshow(th1, cmap='gray')\n plt.show()\n \n input()\n\n\n contours, hierarchy = \\\n cv2.findContours(th1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n img = cv2.drawContours(th1, contours, -1, 126, 1)\n\n pts = []\n for contour in contours:\n moments = cv2.moments(contour)\n if moments['m00'] == 0:\n continue\n cx = float(moments['m10']/moments['m00'])\n cy = float(moments['m01']/moments['m00'])\n pts.append([cx, cy])\n\n npts = len(pts)\n pts = np.array(pts)\n\n\n\n if average_concentric_contours:\n centers = []\n for i, pt1 in enumerate(pts):\n for j, pt2 in enumerate(pts):\n if i == j:\n continue\n dist = distance(pt1, pt2)\n if dist < 0.5 * grating_pixels:\n centers.append( np.mean( np.array([pt1, pt2]), axis=0 ) )\n centers = np.array(centers)\n\n else:\n centers = np.copy(pts)\n\n\n npts = len(centers)\n\n if plot_contours:\n plt.figure()\n plt.imshow(img, cmap='gray', zorder=1)\n plt.scatter(centers[:,0], centers[:,1], marker='X', color='r', s=25, zorder=2)\n plt.show()\n\n input()\n\n\n dists = []\n dist_arr = np.zeros((npts, npts))\n for i, pt1 in enumerate(centers):\n for j, pt2 in enumerate(centers):\n dist = distance(pt1, pt2)\n dist_arr[i,j] = dist\n\n if dist < 0.85 * grating_pixels:\n continue\n elif dist < 1.15 * grating_pixels:\n dists.append(dist)\n elif dist < 1.6 * grating_pixels:\n dists.append(dist / np.sqrt(2))\n else:\n continue\n\n # plt.figure()\n # plt.hist(dist_arr.flatten(), 1000)\n\n # plt.figure()\n # plt.hist(dists, 20)\n\n # plt.show()\n\n\n all_dists += dists\n\n\nmean_dist = np.mean(all_dists)\nstd_dist = np.std(all_dists)\n\nstd_err = std_dist / np.sqrt(len(all_dists))\n\n\n\n\n\np0 = [np.max(all_dists), mean_dist, std_dist]\n\n\nplt.figure()\nvals, bin_edge, patches = plt.hist(all_dists, bins=50)\nbin_loc = bin_edge[:-1] + 0.5 * (bin_edge[1] - bin_edge[0])\n\nplt.axvline(mean_dist, color='r')\n\npopt, pcov = opti.curve_fit(gauss, bin_loc, vals, p0=p0)\n\n\nplot_bins = np.linspace(bin_edge[0], bin_edge[-1], 200)\nplot_vals = gauss(plot_bins, *popt)\n\nplt.plot(plot_bins, plot_vals)\n\n\nmean_dist_2 = popt[1]\nstd_err_2 = popt[2] / np.sqrt(len(all_dists))\n\n\n\n# Compute resolution knowing 1um grating is 1.000 +- 0.005 um (NIST traceable)\nresolution = 1.0 / mean_dist\nresolution_err = resolution * np.sqrt((std_err/mean_dist)**2 + (0.005/1.0)**2)\n\nresolution_2 = 1.0 / mean_dist_2\nresolution_err_2 = resolution_2 * np.sqrt((std_err_2/mean_dist_2)**2 + (0.005/1.0)**2)\n\n# Compute resolution knowing 1um grating is 9.983 +- 0.0189 um (NIST traceable)\n# resolution = 9.983 / mean_dist\n# resolution_err = resolution * np.sqrt((std_err/mean_dist)**2 + (0.0189/9.983)**2)\n\n# resolution_2 = 9.983 / mean_dist\n# resolution_err_2 = resolution_2 * np.sqrt((std_err_2/mean_dist_2)**2 + (0.0189/9.983)**2)\n\n\nprint()\nprint('N : ', len(all_dists))\nprint()\nprint()\n\nprint('Raw Mean separation : ', mean_dist)\nprint('Raw Std. Error on Mean : ', std_err)\nprint()\n\nprint('Gauss Mean separation : ', mean_dist_2)\nprint('Gauss Std. Error on Mean : ', std_err_2)\nprint()\n\nprint('Resolution [um/pixel] : ', resolution)\nprint('Gauss Res. [um/pixel] : ', resolution_2)\n\n\n\n\n\n\nout_arr = [mean_dist, std_err, mean_dist_2, std_err_2, \\\n resolution, resolution_err, resolution_2, resolution_err_2]\n\n\n\nnp.save(savepath, out_arr)\n\n\n\n\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"numpy.copy",
"numpy.exp",
"numpy.mean",
"numpy.max",
"numpy.save",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.hist",
"numpy.arctan2",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axvline",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.imshow"
]
]
|
siavash9000/famousfaces | [
"672c2fe6c6c8406622614a34f1def2c6b08e7a3c"
]
| [
"facecruncher/src/embedd_face.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nfrom scipy import misc\nimport io\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nimport argparse\nimport facenet\nimport align.detect_face\nimport glob\nfrom six.moves import xrange\nimport logging\n\n\nclass FaceEmbedder(object):\n def __init__(self, model_dir=\"/facecruncher/src/pretrained_models\", gpu_memory_fraction=1.0):\n self.face_graph = tf.Graph()\n start_time = time.time()\n with self.face_graph.as_default():\n self.face_session = tf.Session()\n with self.face_session.as_default():\n facenet.load_model(model_dir)\n logging.warning(\"loading facenet model took {}\".format(time.time() - start_time))\n\n self.minsize = 20 # minimum size of face\n self.threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n self.factor = 0.709 # scale factor\n start_time = time.time()\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n self.pnet, self.rnet, self.onet = align.detect_face.create_mtcnn(sess, None)\n logging.warning(\"loading face allignement model took{}\".format(time.time() - start_time))\n\n def embedd_face(self, image_path, image_size=160, margin=44,\n is_aligned=False, gpu_memory_fraction=1.0):\n with self.face_graph.as_default():\n with self.face_session.as_default():\n start_time = time.time()\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n if is_aligned is True:\n images = facenet.load_data(image_path, False, False, image_size)\n else:\n images = self.load_and_align_data(image_path, image_size, margin)\n feed_dict = { images_placeholder: images, phase_train_placeholder:False }\n embed = self.face_session.run(embeddings, feed_dict=feed_dict)\n logging.warning(\"complete runtime {}\".format(time.time() - start_time))\n return embed[0]\n\n def load_and_align_data(self, image, image_size, margin):\n img = misc.imread(io.BytesIO(image))\n img_size = np.asarray(img.shape)[0:2]\n try:\n bounding_boxes, _ = align.detect_face.detect_face(img, self.minsize, self.pnet, self.rnet,\n self.onet, self.threshold, self.factor)\n except:\n logging.warning('Could not detect face in image.')\n bounding_boxes = None\n if bounding_boxes is not None and bounding_boxes.size:\n det = np.squeeze(bounding_boxes[0,0:4])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-margin/2, 0)\n bb[1] = np.maximum(det[1]-margin/2, 0)\n bb[2] = np.minimum(det[2]+margin/2, img_size[1])\n bb[3] = np.minimum(det[3]+margin/2, img_size[0])\n cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]\n aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')\n prewhitened = facenet.prewhiten(aligned)\n images = np.stack([prewhitened])\n else:\n images = np.stack([np.zeros((160, 160, 3))])\n return images\n"
]
| [
[
"numpy.asarray",
"numpy.zeros",
"numpy.minimum",
"tensorflow.get_default_graph",
"tensorflow.Graph",
"tensorflow.Session",
"scipy.misc.imresize",
"tensorflow.ConfigProto",
"numpy.stack",
"numpy.squeeze",
"tensorflow.GPUOptions",
"numpy.maximum"
]
]
|
togoon/akshare | [
"edbf8fc3ff1f029d22990dfb33d5d610458200b7"
]
| [
"akshare/index/zh_stock_index_csindex.py"
]
| [
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/07/22 15:00\nDesc: 中证指数-所有指数-历史行情数据\nhttp://www.csindex.com.cn/zh-CN/indices/index-detail/H30374\n\"\"\"\nimport pandas as pd\nimport requests\n\n\ndef stock_zh_index_hist_csindex(symbol: str = \"H30374\") -> pd.DataFrame:\n \"\"\"\n 中证指数获取某个指数的 5 年历史行情数据\n P.S. 只有收盘价,正常情况下不应使用该接口,除非指数只有中证网站有\n http://www.csindex.com.cn/zh-CN/indices/index-detail/H30374\n :param symbol: 指数代码; e.g., H30374\n :type symbol: str\n :return: 包含日期和收盘价的指数数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = f\"http://www.csindex.com.cn/zh-CN/indices/index-detail/{symbol}\"\n params = {\n \"earnings_performance\": \"5年\",\n \"data_type\": \"json\"\n }\n headers = {\n 'Host': 'www.csindex.com.cn',\n 'Referer': f'http://www.csindex.com.cn/zh-CN/indices/index-detail/{symbol}',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n r.encoding = 'utf-8-sig'\n data_json = r.json()\n temp_df = pd.DataFrame(data_json)[[\"tradedate\", \"tclose\"]]\n temp_df[\"tradedate\"] = pd.to_datetime(temp_df[\"tradedate\"]).dt.date\n temp_df.columns = [\"date\", \"close\"]\n temp_df['close'] = pd.to_numeric(temp_df['close'])\n return temp_df\n\n\nif __name__ == \"__main__\":\n stock_zh_index_hist_csindex_df = stock_zh_index_hist_csindex(symbol=\"H30533\")\n print(stock_zh_index_hist_csindex_df)\n"
]
| [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.to_numeric"
]
]
|
enomotom/nnabla | [
"1947fe16a0a41d19d76cd916f151aa1991ea1b44",
"1947fe16a0a41d19d76cd916f151aa1991ea1b44"
]
| [
"python/src/nnabla/utils/cli/create_image_classification_dataset.py",
"examples/vision/cifar10/cifar10_data.py"
]
| [
"# Copyright (c) 2017 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport imghdr\nimport numpy as np\nimport scipy.misc\nimport nnabla.logger as logger\nimport csv\nimport tqdm\n\n\ndef create_image_classification_dataset_command(args):\n # settings\n source_dir = args.sourcedir\n dest_csv_file_name = [os.path.join(args.outdir, args.file1)]\n if args.file2:\n dest_csv_file_name.append(os.path.join(args.outdir, args.file2))\n dest_dir = args.outdir\n width = int(args.width)\n height= int(args.height)\n padding = args.mode == 'padding'\n ch = int(args.channel)\n shuffle = args.shuffle == 'true'\n test_data_ratio = int(args.ratio2) if args.ratio2 else 0\n \n if source_dir == dest_dir:\n logger.critical(\"Input directory and output directory are same.\")\n return\n\n # create file list\n logger.log(99, \"Creating file list...\")\n dirs = os.listdir(source_dir)\n dirs = [d for d in dirs if os.path.isdir(os.path.join(source_dir, d))]\n dirs.sort()\n # print(dirs)\n \n labels = []\n label_index = -1\n csv_data = []\n pbar = tqdm.tqdm(total=100, unit='%')\n last = 0\n for i, dir in enumerate(dirs):\n # print(dir)\n full_path = os.path.join(source_dir, dir)\n files = os.listdir(full_path)\n files = [f for f in files if os.path.isfile(os.path.join(full_path, f))]\n files.sort()\n found = False\n for i2, file in enumerate(files):\n file_name = os.path.join(full_path, file)\n if imghdr.what(file_name) is not None:\n if not found:\n labels.append(dir)\n label_index += 1\n found = True\n csv_data.append([os.path.join('.', dir, file), label_index])\n current = round(100 * (float(i) / len(dirs) + float(i2) / (len(dirs) * len(files))))\n if last < current:\n pbar.update(current - last)\n last = current\n pbar.close()\n\n # create output data\n logger.log(99, \"Creating output images...\")\n for data in tqdm.tqdm(csv_data, unit='images'):\n src_file_name = os.path.join(source_dir, data[0])\n data[0] = os.path.splitext(data[0])[0] + \".png\"\n dest_file_name = os.path.join(dest_dir, data[0])\n dest_path = os.path.dirname(dest_file_name)\n # print(src_file_name, dest_file_name)\n \n # open source image\n im = scipy.misc.imread(src_file_name)\n if len(im.shape) < 2 or len(im.shape) > 3:\n logger.warning(\"Illigal image file format %s.\".format(src_file_name))\n csv_data.remove(data)\n continue\n elif len(im.shape) == 3:\n # RGB image\n if im.shape[2] != 3:\n logger.warning(\"The image must be RGB or monochrome %s.\".format(src_file_name))\n csv_data.remove(data)\n continue\n \n # resize\n h = im.shape[0]\n w = im.shape[1]\n # print(h, w)\n if w != width or h != height:\n # resize image\n if not padding:\n # trimming mode\n if float(h) / w > float(height) / width:\n target_h = int(float(w) / width * height)\n # print('crop_target_h', target_h)\n im = im[(h - target_h) // 2:h - (h - target_h) // 2, ::]\n else:\n target_w = int(float(h) / height * width)\n # print('crop_target_w', target_w)\n im = im[::, (w - target_w) // 2:w - (w - target_w) // 2]\n # print('before', im.shape)\n im = scipy.misc.imresize(arr=im, size=(height, width), interp='lanczos')\n # print('after', im.shape)\n else:\n # padding mode\n if float(h) / w < float(height) / width:\n target_h = int(float(height) / width * w)\n # print('padding_target_h', target_h)\n pad = (((target_h - h) // 2, target_h - (target_h - h) // 2 - h), (0, 0))\n else:\n target_w = int(float(width) / height * h)\n # print('padding_target_w', target_w)\n pad = ((0, 0), ((target_w - w) // 2, target_w - (target_w - w) // 2 - w))\n if len(im.shape) == 3:\n pad = pad + ((0, 0),)\n im = np.pad(im, pad, 'constant')\n # print('before', im.shape)\n im = scipy.misc.imresize(arr=im, size=(height, width), interp='lanczos')\n # print('after', im.shape)\n\n # change color ch\n if len(im.shape) == 2 and ch == 3:\n # Monochrome to RGB\n im = np.array([im, im, im]).transpose((1,2,0))\n elif len(im.shape) == 3 and ch == 1:\n # RGB to monochrome\n im = np.dot(im[...,:3], [0.299, 0.587, 0.114])\n \n # output\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n scipy.misc.imsave(dest_file_name, im)\n \n logger.log(99, \"Creating CSV files...\")\n if shuffle:\n import random\n random.shuffle(csv_data)\n \n csv_data_num = [(len(csv_data) * (100 - test_data_ratio)) // 100]\n csv_data_num.append(len(csv_data) - csv_data_num[0])\n data_head = 0\n for csv_file_name, data_num in zip(dest_csv_file_name, csv_data_num):\n if data_num:\n csv_data_2 = csv_data[data_head:data_head + data_num]\n data_head += data_num\n \n csv_data_2.insert(0, ['x:image', 'y:label'])\n with open(csv_file_name, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerows(csv_data_2)\n",
"'''\nProvide data iterator for CIFAR10 examples.\n'''\nfrom contextlib import contextmanager\nimport numpy as np\nimport struct\nimport tarfile\nimport zlib\nimport time\nimport os\nimport errno\n\nfrom nnabla.logger import logger\nfrom nnabla.utils.data_iterator import data_iterator\nfrom nnabla.utils.data_source import DataSource\nfrom nnabla.utils.data_source_loader import download, get_data_home\n\n\nclass Cifar10DataSource(DataSource):\n '''\n Get data directly from cifar10 dataset from Internet(yann.lecun.com).\n '''\n\n def _get_data(self, position):\n image = self._images[self._indexes[position]]\n label = self._labels[self._indexes[position]]\n return (image, label)\n\n def __init__(self, train=True, shuffle=False, rng=None):\n super(Cifar10DataSource, self).__init__(shuffle=shuffle)\n\n # Lock\n lockfile = os.path.join(get_data_home(), \"cifar10.lock\")\n start_time = time.time()\n while True: # busy-lock due to communication between process spawn by mpirun\n try:\n fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if (time.time() - start_time) >= 60 * 30: # wait for 30min\n raise Exception(\n \"Timeout occured. If there are cifar10.lock in $HOME/nnabla_data, it should be deleted.\")\n\n time.sleep(5)\n\n self._train = train\n data_uri = \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n logger.info('Getting labeled data from {}.'.format(data_uri))\n r = download(data_uri) # file object returned\n with tarfile.open(fileobj=r, mode=\"r:gz\") as fpin:\n # Training data\n if train:\n images = []\n labels = []\n for member in fpin.getmembers():\n if \"data_batch\" not in member.name:\n continue\n fp = fpin.extractfile(member)\n data = np.load(fp, encoding=\"bytes\")\n images.append(data[b\"data\"])\n labels.append(data[b\"labels\"])\n self._size = 50000\n self._images = np.concatenate(\n images).reshape(self._size, 3, 32, 32)\n self._labels = np.concatenate(labels).reshape(-1, 1)\n # Validation data\n else:\n for member in fpin.getmembers():\n if \"test_batch\" not in member.name:\n continue\n fp = fpin.extractfile(member)\n data = np.load(fp, encoding=\"bytes\")\n images = data[b\"data\"]\n labels = data[b\"labels\"]\n self._size = 10000\n self._images = images.reshape(self._size, 3, 32, 32)\n self._labels = np.array(labels).reshape(-1, 1)\n r.close()\n logger.info('Getting labeled data from {}.'.format(data_uri))\n\n self._size = self._labels.size\n self._variables = ('x', 'y')\n if rng is None:\n rng = np.random.RandomState(313)\n self.rng = rng\n self.reset()\n\n # Unlock\n os.close(fd)\n os.unlink(lockfile)\n\n def reset(self):\n if self._shuffle:\n self._indexes = self.rng.permutation(self._size)\n else:\n self._indexes = np.arange(self._size)\n super(Cifar10DataSource, self).reset()\n\n @property\n def images(self):\n \"\"\"Get copy of whole data with a shape of (N, 1, H, W).\"\"\"\n return self._images.copy()\n\n @property\n def labels(self):\n \"\"\"Get copy of whole label with a shape of (N, 1).\"\"\"\n return self._labels.copy()\n\n\n@contextmanager\ndef data_iterator_cifar10(batch_size,\n train=True,\n rng=None,\n shuffle=True,\n with_memory_cache=False,\n with_parallel=False,\n with_file_cache=False):\n '''\n Provide DataIterator with :py:class:`Cifar10DataSource`\n with_memory_cache, with_parallel and with_file_cache option's default value is all False,\n because :py:class:`Cifar10DataSource` is able to store all data into memory.\n\n For example,\n\n .. code-block:: python\n\n with data_iterator_cifar10(True, batch_size) as di:\n for data in di:\n SOME CODE TO USE data.\n\n '''\n with Cifar10DataSource(train=train, shuffle=shuffle, rng=rng) as ds, \\\n data_iterator(ds,\n batch_size,\n with_memory_cache,\n with_parallel,\n with_file_cache) as di:\n yield di\n"
]
| [
[
"numpy.array",
"numpy.pad",
"numpy.dot"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.random.RandomState",
"numpy.load",
"numpy.arange"
]
]
|
Anikbh11/trident | [
"4edad87889997dbafcef68c6a564cf67d921c938"
]
| [
"tests/test_light_ray.py"
]
| [
"\"\"\"\nUnit test for the light_ray analysis module\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2016-2017, yt Development Team.\n# Copyright (c) 2017, Trident Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.convenience import \\\n load\nfrom yt.testing import \\\n assert_array_equal, \\\n assert_almost_equal\nfrom trident import \\\n LightRay, \\\n make_simple_ray\nfrom trident.testing import \\\n answer_test_data_dir, \\\n TempDirTest\nimport os\n\nCOSMO_PLUS = os.path.join(answer_test_data_dir,\n \"enzo_cosmology_plus/AMRCosmology.enzo\")\nCOSMO_PLUS_SINGLE = os.path.join(answer_test_data_dir,\n \"enzo_cosmology_plus/RD0009/RD0009\")\nGIZMO_COSMO_SINGLE = os.path.join(answer_test_data_dir,\n \"gizmo_cosmology_plus/snap_N128L16_150.hdf5\")\n\ndef compare_light_ray_solutions(lr1, lr2):\n assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)\n if len(lr1.light_ray_solution) == 0:\n return\n for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):\n for field in s1:\n if field in [\"next\", \"previous\"]:\n continue\n if isinstance(s1[field], np.ndarray):\n assert_array_equal(s1[field], s2[field])\n else:\n assert s1[field] == s2[field]\n\nclass LightRayTest(TempDirTest):\n\n def test_light_ray_cosmo(self):\n \"\"\"\n This test generates a cosmological light ray\n \"\"\"\n lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)\n\n lr.make_light_ray(seed=1234567,\n fields=['temperature', 'density', 'H_number_density'],\n data_filename='lightray.h5')\n\n ds = load('lightray.h5')\n compare_light_ray_solutions(lr, ds)\n\n def test_light_ray_cosmo_nested(self):\n \"\"\"\n This test generates a cosmological light ray confing the ray to a subvolume\n \"\"\"\n left = np.ones(3) * 0.25\n right = np.ones(3) * 0.75\n\n lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)\n\n lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,\n fields=['temperature', 'density', 'H_number_density'],\n data_filename='lightray.h5')\n\n ds = load('lightray.h5')\n compare_light_ray_solutions(lr, ds)\n\n def test_light_ray_cosmo_nonperiodic(self):\n \"\"\"\n This test generates a cosmological light ray using non-periodic segments\n \"\"\"\n lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)\n\n lr.make_light_ray(seed=1234567, periodic=False,\n fields=['temperature', 'density', 'H_number_density'],\n data_filename='lightray.h5')\n\n ds = load('lightray.h5')\n compare_light_ray_solutions(lr, ds)\n\n def test_light_ray_non_cosmo(self):\n \"\"\"\n This test generates a non-cosmological light ray\n \"\"\"\n lr = LightRay(COSMO_PLUS_SINGLE)\n\n ray_start = [0,0,0]\n ray_end = [1,1,1]\n lr.make_light_ray(start_position=ray_start, end_position=ray_end,\n fields=['temperature', 'density', 'H_number_density'],\n data_filename='lightray.h5')\n\n ds = load('lightray.h5')\n compare_light_ray_solutions(lr, ds)\n\n def test_light_ray_redshift_coverage_grid(self):\n \"\"\"\n Tests to assure a light ray covers the full redshift range appropriate\n for that comoving line of sight distance. Was not always so!\n \"\"\"\n ds = load(COSMO_PLUS_SINGLE)\n ray = make_simple_ray(ds, start_position=ds.domain_left_edge, end_position=ds.domain_right_edge, lines=['H'])\n assert_almost_equal(ray.r['redshift'][0], 6.99900695e-03, decimal=8)\n assert_almost_equal(ray.r['redshift'][-1], -1.08961751e-02, decimal=8)\n\n def test_light_ray_non_cosmo_from_dataset(self):\n \"\"\"\n This test generates a non-cosmological light ray created from an already\n loaded dataset\n \"\"\"\n ds = load(COSMO_PLUS_SINGLE)\n lr = LightRay(ds)\n\n ray_start = [0,0,0]\n ray_end = [1,1,1]\n lr.make_light_ray(start_position=ray_start, end_position=ray_end,\n fields=['temperature', 'density', 'H_number_density'],\n data_filename='lightray.h5')\n\n ds = load('lightray.h5')\n compare_light_ray_solutions(lr, ds)\n\n def test_light_ray_redshift_coverage(self):\n \"\"\"\n Tests to assure a light ray covers the full redshift range appropriate\n for that comoving line of sight distance. Was not always so!\n \"\"\"\n ds = load(GIZMO_COSMO_SINGLE)\n ray = make_simple_ray(ds, start_position=ds.domain_left_edge, end_position=ds.domain_right_edge, lines=['H'])\n assert_almost_equal(ray.r['redshift'][0], 0.00489571, decimal=8)\n assert_almost_equal(ray.r['redshift'][-1], -0.00416831, decimal=8)\n\n def test_light_ray_redshift_monotonic(self):\n \"\"\"\n Tests to assure a light ray redshift decreases monotonically\n when ray extends outside the domain.\n \"\"\"\n ds = load(COSMO_PLUS_SINGLE)\n ray = make_simple_ray(ds, start_position=ds.domain_center,\n end_position=ds.domain_center+ds.domain_width)\n assert((np.diff(ray.data['redshift']) < 0).all())\n"
]
| [
[
"numpy.ones",
"numpy.diff"
]
]
|
mjsiers/practice-data-cpcapaper | [
"5f5c004c3c478195510cfd0d90e5470f85525b74"
]
| [
"notebooks/generators/workbooks/01-signal-workbook.py"
]
| [
"#%% [markdown]\n# ## Simulated Signal Data Generation\n# The simulated signal for this example will be constructed by the combination of two Gaussian peaks with some\n# overlap. The height of each peak will be dependent on a concentration value. The target value for each generated\n# signal will be the concentration value. The goal of the project will be correctly classify each sample based on\n# the concentration value. \n\n#%%\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nfrom scipy.stats import norm \n\nmpl.style.use('seaborn-notebook')\nplt.rcParams[\"figure.figsize\"] = (12, 5)\n\n#%% [markdown]\n# The following plot shows the separate Gaussian peaks used to generate the project test signal. This sample signal\n# was derived from an example in the \n# [**Rampy**](https://github.com/charlesll/rampy/blob/master/examples/Baseline_and_Centroid_determination.ipynb) project.\n\n#%%\nxnum = 600\nnp.random.seed(42)\nx = np.arange(0, xnum, 1.0)\nS_1 = norm.pdf(x, loc=310.0, scale=40.0)\nS_2 = norm.pdf(x, loc=390.0, scale=20.0)\nS_true = np.vstack((S_1, S_2))\n\nfig, axs = plt.subplots()\naxs.plot(x, S_1)\naxs.plot(x, S_2)\nfig.suptitle('Signal Base Gaussian Shapes')\n\n#%% [markdown]\n# The following plots will show the combined Gaussian peaks at varying concentration levels. The first\n# plot is generated using a fixed set of concentrations values. The second plot generates a random set of\n# curves with the line color determined from the concentration level.\n\n#%%\nC_true = np.array([[0.0, 1.0], [0.25, 0.75], [0.5, 0.5], [0.75, 0.25], [1.0, 0.0]])\nsignal = np.dot(C_true, S_true)\n\nfig, axs = plt.subplots()\nfor i, level in enumerate(C_true):\n axs.plot(x, signal[i], label='{0:.2f}-concentration'.format(C_true[i, 0]))\nfig.suptitle('Combined Signal at Varying Concentration Levels')\nplt.legend()\n\n#%%\nnsamples = 100\nC_levels = np.random.rand(nsamples)\nC_matrix = np.vstack((C_levels, (1-C_levels))).T\nsamples = np.dot(C_matrix, S_true)\n\n# norm is a class which, when called, can normalize data into the\n# [0.0, 1.0] interval.\nnorm = mpl.colors.Normalize(vmin=np.min(C_levels), vmax=np.max(C_levels))\n\n# create a ScalarMappable and initialize a data structure\njet = plt.get_cmap('jet')\ns_m = mpl.cm.ScalarMappable(norm=norm, cmap=jet)\ns_m.set_array([])\n\n# plotting spectra\n# calling the ScalarMappable that was initialised with c_m and norm\nfig, axs = plt.subplots()\nfor i in range(nsamples):\n color = s_m.to_rgba(C_levels[i])\n axs.plot(x, samples[i, :].T, color=color)\n\n# we plot the colorbar, using again our\n# ScalarMappable\nc_bar = plt.colorbar(s_m)\nc_bar.set_label(r\"C_\")\n\nfig.suptitle('Combined Signals Colored By Concentration Levels')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.show()\n"
]
| [
[
"numpy.max",
"matplotlib.cm.ScalarMappable",
"numpy.array",
"matplotlib.style.use",
"numpy.dot",
"numpy.random.rand",
"matplotlib.pyplot.colorbar",
"numpy.random.seed",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.legend",
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.vstack"
]
]
|
Kuro96/vedasal | [
"3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2"
]
| [
"tests/test_convlstm.py"
]
| [
"import torch\n\nfrom kurosal.models.sequencer import ConvLSTMCell\n\n\nif __name__ == '__main__':\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '7'\n cell = ConvLSTMCell(3, 3, 3, 0.1).cuda()\n x = torch.Tensor(4, 3, 5, 5).cuda()\n\n out, state = cell(x, None)\n print(out, out.size())\n out, state = cell(x, state)\n print(out, out.size())\n\n out, state = cell(x, None)\n print(out.size())\n out, state = cell(x, state)\n print(out.size())\n import pdb\n pdb.set_trace()\n"
]
| [
[
"torch.Tensor"
]
]
|
jaschau/21datalab | [
"5f9ad831273197f4fffc35b3d55a311d24a54307"
]
| [
"model.py"
]
| [
"import glob\nimport json\nimport copy\nimport importlib\nimport threading\nimport logging\nimport pytz\n\n#for tables\nimport numpy\nimport numpy as np\nimport datetime\nimport dateutil.parser\nimport sys\nimport os\nimport time\nimport uuid\nimport hashlib\nimport random\nimport traceback\nfrom dates import *\n\n# type hints\nfrom typing import List\n\nimport modeltemplates\n\n# for Observer\nfrom queue import Queue\nfrom queue import Empty\nimport utils\n\nfrom timeseries import TimeSeriesTable\nfrom dates import *\n\nimport inspect\nfrom utils import str_lim\n\"\"\"\nnext Todo\n- \n- execute: problem im thread mit der Ausführung\n- code documentation\n- google document\n- \n\"\"\"\n\nsys.path.append(\"./plugins\") #for the importlib loader, doesn't understand relative paths\n#sys.path.append(\"./private\") #for the importlib loader, doesn't understand relative paths\n\nmyGlobalDir = os.path.dirname(os.path.realpath(__file__)) # holds the directory of this script\n\n\n\ndef getRandomId():\n return '%08x' % random.randrange(16 ** 8)\n\n\n#used as an OOP wrapper for the flat and procedural style of the model class\nclass Node():\n \"\"\" used as an OOP wrapper for the flat and procedural style of the model class\n it is a convenient way to access nodes and their hierarchy and internals\n \"\"\"\n def __init__(self,myModel,myId):\n \"\"\" a node can be created by calling the\n mynode = model.get_node(\"root.mynode\") or\n mynode = Node(mymodel,\"123\")\n Returns:\n a node object for further access to values, hierarchy etc.\n \"\"\"\n self.model = myModel # this is not a copy!!\n self.id = myId\n\n def __repr__(self):\n return 'Node(id={:}, value={:})'.format(self.id, self.get_value())\n\n def get_value(self):\n \"\"\" Returns:\n the \"value\" property of the node\n None if node has no \"value\"\n \"\"\"\n return self.model.get_value(self.id)\n\n #####################\n # time series node API\n\n def get_time_series(self, start=None,\n end=None,\n noBins=None,\n includeIntervalLimits=False,\n resampleTimes=None,\n format=\"default\",\n toList = False,\n resampleMethod = None):\n \"\"\"\n Returns\n dict with [\"__time\":[...],\"values\":[...]\n \"\"\"\n\n browsePath = self.model.get_browse_path(self.id)\n\n data = self.model.time_series_get_table(variables = [browsePath],\n tableDescriptor=None,\n start=start,\n end=end,\n noBins=noBins,\n includeIntervalLimits=includeIntervalLimits,\n resampleTimes=resampleTimes,\n format=format,\n toList=toList,\n resampleMethod=resampleMethod)\n if data !={} :\n return data[browsePath]\n else:\n return None\n\n def get_raw_time_series(self,start=None,end=None):\n return self.model.time_series_get_raw(self.id,start=start,end=end)\n\n def add_references(self,targetNodes,deleteAll=False):\n \"\"\"\n add references from the node to the targets\n Args:\n targetNodes: node or list of nodes to reference to\n deleteAll: if set true, we delete all existing references before creating the new\n Returns\n True/False for success/error\n \"\"\"\n if deleteAll:\n self.model.remove_forward_refs(self.id)#this deletes all existing\n if type(targetNodes) is not list:\n targetNodes = [targetNodes]\n targetIds = [node.get_id() for node in targetNodes]\n return self.model.add_forward_refs(self.id,targetIds)\n\n def set_value(self,value):\n \"\"\"\n special support for \"column\" types: if a scalar is given, we make a \"full\" array\n \"\"\"\n if self.get_properties()[\"type\"] == \"column\":\n if type(value) != numpy.ndarray and type(value) != list:\n #we have a scalar, so we set it\n #get the len of the table\n timeNode = self.get_table_time_node()\n length = len(timeNode.get_value())\n value = numpy.full(length,value,dtype=numpy.float64)\n\n return self.model.set_value(self.id,value)\n\n def set_time_series(self,values=None,times=None):\n \"\"\"\n replaces the time series with value and times, it deletes the existing\n\n \"\"\"\n return self.model.time_series_set(self.id,values=values,times=times)\n\n def insert_time_series(self,values=None,times=None,allowDuplicates = False):\n \"\"\"\n insert data, if the time stamp exists already, we replace it\n \"\"\"\n return self.model.time_series_insert(self.id,values=values, times=times, allowDuplicates=allowDuplicates)\n\n def merge_time_series(self,values=None, times=None):\n \"\"\" merge the times series of mergeNode into this node\"\"\"\n return self.model.time_series_merge(self.id,values = values,times=times)\n\n def delete_time_series(self,start=None,end=None):\n return self.model.time_series_delete_area(self.id, start=start, end=end)\n\n #####################\n # event series node API\n def get_event_series(self, start=None, end=None, format=\"default\",eventFilter = None):\n return self.model.event_series_get(self.id,start=start,end=end,format=format,eventFilter=eventFilter)\n\n def set_event_series(self, values=None, times=None):\n \"\"\"\n replaces the event series with value and times, it deletes the existing\n \"\"\"\n return self.model.event_series_set(self.id,values=values,times=times)\n\n def insert_event_series(self,values=None,times=None,allowEventDuplicates = False):\n return self.model.event_series_insert(self.id,values,times,allowEventDuplicates=allowEventDuplicates)\n\n def delete_event_series(self,start=None, end = None, eventsToDelete=[]):\n return self.model.event_series_delete(desc=self.id,start=start,end=end,eventsToDelete=eventsToDelete)\n\n\n def get_parent(self):\n \"\"\" Returns:\n a Node()-instance of the parent of the current node,\n None if no parent available\n \"\"\"\n nodeInfo = self.model.get_node_info(self.id)\n if nodeInfo:\n return self.model.get_node(nodeInfo[\"parent\"])\n else:\n return None\n\n def get_child(self,childName):\n \"\"\"\n Args:\n childName(nodedescription):\n Returns:\n a Node() instance of the child holding the childName\n None if the current node does not have a child with the name childName\n \"\"\"\n nodeInfo = self.model.get_node_info(self.id)\n if nodeInfo:\n for childId in nodeInfo['children']:\n childInfo = self.model.get_node_info(childId)\n if childInfo[\"name\"] == childName:\n return self.model.get_node(childId)\n return None\n\n\n def delete(self):\n \"\"\"\n delete this node from the model, note that the object itself it not destroyed, but it is disconnected from the model\n so should not be used anymore afterwards\n :return:\n \"\"\"\n return self.model.delete_node(self.id)\n\n def create_child(self,name=None,type=\"folder\",value=None,properties={}):\n \"\"\"\n create a node under the current node, if the node exists already, we get the node\n Args:\n name [string] the child name\n type [string] the type of the node\n value [any] direct assignment of values\n properies [dict] a dict with further settings of properies like value, type etc\n Returns:\n the node objects or none if not available\n \"\"\"\n\n if name == None:\n name = '%08x' % random.randrange(16 ** 8)\n id = self.model.create_node(parent=self.id,name=name,type=type,value=value,properties=properties)\n if id:\n return self.model.get_node(id)\n else:\n #we try to get it anyways\n return self.get_child(name)\n\n\n def get_children(self, deepLevel=1):\n \"\"\" Returns:\n a list of Node()-objects which are the children of the current node\n args:\n deepLevel: set >1 to get children and childrens' children\n \"\"\"\n nodeInfo = self.model.get_node_info(self.id)\n children = []\n if nodeInfo[\"children\"]:\n children=[self.model.get_node(id) for id in nodeInfo['children'] ]\n\n while deepLevel>1:\n deepLevel -=1\n childrenOld = children.copy()\n for child in childrenOld:\n children.extend(child.get_children())\n #remove dublicates via id:\n childDict = {child.get_id():child for child in children} # same keys(id) will only be there once\n children = list(childDict.values())\n return children\n\n\n def get_properties(self):\n \"\"\" Returns:\n a dictionary holding the properties of the node like {\"value\":123,\"name\":\"myVariable\",\"children\":...}\n \"\"\"\n nodeInfo = self.model.get_node_info(self.id)\n return copy.deepcopy(nodeInfo)\n\n def get_type(self):\n \"\"\"\n Retuns:\n the type of the node\n \"\"\"\n return self.get_property(\"type\")\n\n def get_property(self,property):\n \"\"\"\n Args:\n property: the property name asked for\n Returns:\n the value of the property behind the property given\n None if the property does not exist\n \"\"\"\n nodeDict =self.get_properties()\n if property in nodeDict:\n return self.get_properties()[property]\n else:\n return None\n\n def set_properties(self,properties):\n \"\"\"\n add or modify properties of a node\n Args:\n properties [dict] holding key,value for the properties\n Returns\n True for ok, False for not done\n \"\"\"\n return self.model.set_properties(properties,nodeDesc=self.id)\n\n\n def get_model(self):\n \"\"\" this function should only be used for testing, we should never be in the need to access the model inside\n Returns:\n the underlying model of type Model() class\n \"\"\"\n return self.model\n\n def get_target_ids(self):\n \"\"\" this function returns the target ids of a referencer as a list, not resolving the leaves\"\"\"\n\n if self.get_properties()[\"type\"] != \"referencer\":\n return None\n return self.get_properties()[\"forwardRefs\"]\n\n def get_target(self):\n \"\"\" this function returns the first direct taret node of a referencer not resolving the leaves\"\"\"\n if self.get_properties()[\"type\"] == \"referencer\":\n targets = self.get_properties()[\"forwardRefs\"]\n if targets:\n return Node(self.model,targets[0])\n return None\n\n def get_targets(self):\n \"\"\" this function returns the target Nodes of a referencer as a list, not resolving the leaves\"\"\"\n if self.get_properties()[\"type\"] != \"referencer\":\n return None\n targets = []\n for nodeid in self.get_properties()[\"forwardRefs\"]:\n targets.append(Node(self.model,nodeid))\n return targets\n\n def get_leaves(self):\n \"\"\" this function returns a list of Nodes containing the leaves where this referencer points to\n this functions works only for nodes of type \"referencer\", as we are following the forward references\n leaves are defined as following:\n 1) all nodes that are listed under the forward references and which are not of type referencer or folder\n 2) if nodes pointed to are referencer, the targets are again analyzed\n 3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves\n folders and referencers inside the folder are not taken into account\n doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1\n Returns:\n all nodes which are considered leaves as a list of Node() objects\n \"\"\"\n leaves = self.model.get_leaves(self.id) # a list of node dicts\n leaveNodes = []\n for leave in leaves:\n leaveNodes.append(Node(self.model,leave[\"id\"]))\n return leaveNodes\n\n def get_leaves_ids(self):\n \"\"\"\n get the list of ids of the leaves, see get_leaves()\n Returns:\n a list of ids of the leaves\n \"\"\"\n return self.model.get_leaves_ids(self.id)\n\n\n\n def get_id(self):\n \"\"\" Returns: the nodeid (which is generated by the system) \"\"\"\n return self.id\n\n def get_browse_path(self):\n \"\"\" Returns: the browsepath along the style \"root.myfolder.myvariable...\" \"\"\"\n return self.model.get_browse_path(self.id)\n\n def get_name(self):\n \"\"\" Returns: the name of the node without the path \"\"\"\n return self.model.get_node_info(self.id)[\"name\"]\n\n def get_node(self,desc):\n return self.model.get_node(desc)\n\n def get_table_time_node(self):\n \"\"\" if the current node belongs to a table, then we can get the time node\n a node \n Returns:\n (obj Node()) the node of type\n \"\"\"\n timeNode = self.model.find_table_time_node(self.id)\n if timeNode:\n return Node(self.model,timeNode)\n else:\n return None\n\n def get_table_len(self):\n \"\"\"\n if the current node is a type \"table\", we get the current len\n Return:\n the len of the columns of the table\n \"\"\"\n return self.model.get_table_len(self.id)\n\n def get_table_node(self):\n \"\"\"\n if the current node is a column of a time series table, we get the according table node of type \"table\"\n Return:\n a Node() of type \"table\" which is the table of the current node\n \"\"\"\n tableId = self.model.find_table_node(self.id)\n if tableId:\n return self.model.get_node(tableId)\n else:\n return None\n\n\n\n def get_time_indices(self,startTime,endTime):\n \"\"\" works only for the time node, it looks to find the timeField node of the table to which the node belongs\n then tries to find start and end time inside the timeField column and returns the index (rownumber) which are\n INSIDE the given startTime, endTime\n Args:\n startTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string\n endTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string\n Returns:\n (numpy array) indexnumbers containing the rows of the table that fall inside the given [startTime, endTime] intervall\n None for not finding table, timeField, start-endTimes whatsoever\n\n \"\"\"\n try:\n startTime = date2secs(startTime)\n endTime = date2secs(endTime)\n times = numpy.asarray(self.get_value())\n indices = numpy.where((times >= startTime) & (times <= endTime))[0]\n return indices\n except:\n return None\n\n def execute(self):\n return self.model.execute_function(self.id)\n \n def execute_synchronous(self):\n return self.model.execute_synchronous(self.id)\n\n def instantiate(self):\n return self.model.instantiate_object(self.id)\n\n def get_object(self):\n return self.model.get_object(self.id)\n\n def get_logger(self):\n return self.model.logger\n\n def connect_to_table(self,tableNode):\n \"\"\"\n connect a node to a table, it must be a column type\n the node itself will be reset and filled with numpy.inf and prepared to work with the table:\n an array will be created with np.inf of the current table size\n and the column will be hooked to the table referencer\n\n Returns:\n True on success\n \"\"\"\n if self.get_property(\"type\") != \"column\":\n return False\n\n #now make an array of np.inf of the current table size and apply the value\n timeNode = tableNode.get_table_time_node()\n if not timeNode:\n return False\n tableLen = len(timeNode.get_value())\n self.set_value(numpy.full(tableLen,numpy.inf,dtype=numpy.float64))\n\n #now hook it as column to the table\n #check if we are part of it already\n for column in tableNode.get_child(\"columns\").get_leaves():\n if column.get_id() == self.get_id():\n return True\n #now connect it to the table\n return self.model.add_forward_refs(tableNode.get_child(\"columns\").get_id(), [self.id],allowDuplicates=False)\n\n def get_columns(self):\n \"\"\"\n get the columns nodes of a table without the time node\n can be executed on the table node\n\n Returns:\n list of node objects which are the columns of the table without the time node\n \"\"\"\n if self.get_properties()[\"type\"] != \"table\":\n return None\n nodes = self.get_child(\"columns\").get_leaves()\n timeNode = self.get_table_time_node()\n return [node for node in self.get_child(\"columns\").get_leaves() if node.get_id() != timeNode.get_id()]\n\nclass Observer:\n # The observer needs a reference to the model, because the rest service is not able to detect\n # when the client connection is closed, but the observer message handling loop can detect it\n # this way the observer can detach itself from the model, when the client is disconnected\n # there are two queues involved: the updateQueue holding events pushed by the observers from the model\n # and the eventQueues which is the filtered updateQueue (filtering avoids sending multiple identical events in short time\n def __init__(self, model):\n self.model = model\n\n # Message queues to store the new events and last time stamps\n self.updateQueue = Queue()\n self.eventQueues = {} # k,v = event:{\"lasttimestamp\":datetime,\"queue\":Queue()\n self.minWaitTime = 0.500 #in seconds float\n\n # use the logger of th model\n self.logger = self.model.logger\n self.lock = threading.RLock()\n\n\n #preload queue: this is a workaround as the browser does not get the first 2 events immideately\n # it actually doesn't help ..?\n for i in range(2):\n self.updateQueue.put({\"event\":\"_preload\",\"id\":\"\",\"data\":{\"xy\":str(i)}})\n\n def update(self, event):\n \"\"\"\n inform about the occurrence of an event,\n Args:\n event \"string\": the\n :param event:\n :return:\n \"\"\"\n\n defaultEvent = {\"data\":\"\",\"id\":\"\",\"event\":\"\"}\n defaultEvent.update(event)\n self.updateQueue.put(defaultEvent)\n #self.logger.debug(f\"Qup {id(self)} {defaultEvent['event']}, {defaultEvent['id']}\")\n\n\n\n def get_event(self):\n \"\"\"\n get the next event from the observerclass, this is used a generator for the webserver\n we also filter out events to avoid a train of identical events\n the filtering uses the self.minWaitTime, within that period we don't sent identical event;\n events are \"identical\", if they have the same \"event\" and \"data\"\n \"\"\"\n\n self.logger.debug(f\"Observer {id(self)} get_event()\")\n stop_event_processing = False # This flag shows when to stop the event processing\n\n while not stop_event_processing:\n try:\n # Try to retrieve an item from the update queue\n event = self.updateQueue.get(block=True,timeout=self.minWaitTime)\n #self.logger.debug(f\"event pick {event}\")\n #create an eventIdentification, this is used to filter out repeated events\n # we select the eventIdentificton in a way that events that have unique information keeps them\n # we take all information from the event.data field, so only the events WITHOUT unique data will be removed\n # those are typically the tree.update events\n eventIdentification = event[\"event\"] #the event name itself\n for key in event[\"data\"]:\n eventIdentification = eventIdentification+str(key)+str(event[\"data\"][key])\n #now sort this event into the queues of eventids\n if eventIdentification not in self.eventQueues:\n # this is a new type/identificatin of event, create an entry in the event queue\n # put the event in the queue and make the last timestamp so that we send it out now\n self.eventQueues[eventIdentification]={\"lastTimeStamp\":0,\"queue\":Queue()}\n self.eventQueues[eventIdentification][\"queue\"].put(event)\n\n except Exception as ex:\n # this happens if we time out the queue get, no problem, just continue\n #self.logger.error(f\"Exception observer {id(self)} thread self.updateQueue.get: {ex},{str(sys.exc_info()[0])}\")\n pass\n\n #now go over all the sorted event queues and check what to send out:\n if 0:\n #show the queues\n for k,v in self.eventQueues.items():\n q = v[\"queue\"]\n qLen = q.qsize()\n #self.logger.debug(f\"Queue {k}: len {qLen} {[q.queue[id] for id in range(qLen)]}\")\n try:\n now = time.time()\n for eventIdentification,entry in self.eventQueues.items(): # entry is {\"lasttimestampe\": \"queue\":\n #self.logger.debug(f\"observer {id(self)} check queue of {eventIdentification} size: {entry['queue'].qsize()},last:{entry['lastTimeStamp']}, now:{now}, ready: {now > (entry['lastTimeStamp']+self.minWaitTime)}\")\n if (not entry[\"queue\"].empty()) and (now > (entry[\"lastTimeStamp\"]+self.minWaitTime)):\n #send this event, the timeout was met, we pull the first event from the queue, trash the remaining ones\n \"\"\"\n old code\n \n self.eventQueues[eventIdentification][\"lastTimeStamp\"]=now\n #send out this event\n myEvent = self.eventQueues[eventIdentification][\"queue\"].get()\n event_string = f'id:{myEvent[\"id\"]}\\nevent: {myEvent[\"event\"]}\\ndata: {myEvent[\"data\"]}\\n\\n'\n self.logger.debug(f'Observer {id(self)} sending event: {event_string}')\n\n #pull empty the queue\n if self.eventQueues[eventIdentification]['queue'].qsize():\n self.logger.debug(f\"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}\")\n while not self.eventQueues[eventIdentification][\"queue\"].empty():\n self.eventQueues[eventIdentification][\"queue\"].get(False)\n self.logger.debug(f\"Qyield {id(self)} : {myEvent}\")\n yield event_string\n \"\"\"\n self.eventQueues[eventIdentification][\"lastTimeStamp\"]=now\n #send out this event\n\n #pull empty the queue\n if self.eventQueues[eventIdentification]['queue'].qsize():\n #self.logger.debug(f\"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}\")\n while not self.eventQueues[eventIdentification][\"queue\"].empty():\n myEvent = self.eventQueues[eventIdentification][\"queue\"].get(False)\n\n event_string = f'id:{myEvent[\"id\"]}\\nevent: {myEvent[\"event\"]}\\ndata: {json.dumps(myEvent[\"data\"])}\\n\\n'\n #self.logger.debug(f\"Qyield {id(self)} : {myEvent}\")\n yield event_string\n\n\n # This exception is raised when the generator function is exited, which means that the\n # client side connection to the SSE stream was close, thus the observer could be removed\n except GeneratorExit:\n self.logger.warning(f\"Observer {id(self)} connection closed.\")\n stop_event_processing = True\n\n self.logger.warning(f\"Observer {id(self)} exiting event processing.\")\n\n # Detach this observer from the model\n self.model.detach_observer(self)\n\n\nclass Model:\n nodeTemplate = {\"id\": None, \"name\": None, \"type\": \"folder\", \"parent\": None, \"children\": [], \"backRefs\": [],\"forwardRefs\":[],\"value\":None}\n\n\n def __init__(self):\n \"\"\"\n initialize an empty Model object, it will contain the root Node as folder with Id \"0\"\n during the initialization, also the plug-ins (all files in the ./plugin) are loaded:\n all templates and functions are imported\n a model holds all modelling information and data to work on\n \"\"\"\n self.version = 0.1\n self.model = {\"1\":{\n \"name\":\"root\",\n \"type\":\"folder\",\n \"children\":[],\n \"parent\":\"0\",\n \"id\":\"1\",\n \"backRefs\":[],\n \"forwardRefs\":[],\n \"version\":self.version\n }}\n self.disableObserverCounter = 0 # a counting sema (under manual lock) for the disabling: if zero the notify_observers is active otherwise not\n self.__init_logger(logging.DEBUG)\n self.globalIdCounter=1 # increased on every creation of a node, it holds the last inserted node id\n self.idCreationHash = True # if this is true, we create the id per hash, not per counter\n self.ts = TimeSeriesTable()\n self.functions={} # a dictionary holding all functions from ./plugins\n self.templates={} # holding all templates from ./plugins\n self.lock = threading.RLock()\n self.executeFunctionRunning = False # set to true, makes sure only one functions runs at a time\n\n self.objectClasses = {} # a dictionaryholding all object clases from the /plugins\n self.import_default_plugins()\n self.differentialHandles ={} # containing model_copy entries to support differential queries\n self.diffHandleCounter = 0 # used only for debugging\n self.differentialHandlesMaxPerUser = 10\n self.currentModelName = \"emptyModel\" # the current name of the model\n self.modelUpdateCounter = 0 #this is for the tree observer, on any change, we update the counter\n self.observerStatus = {} # a dict holding the key = observerid and value : the needed status of an observer processing\n self.executionQueue = Queue()\n\n self.observers = []\n self.sse_event_id = 1\n\n self.start_function_execution_thread()\n\n def __del__(self):\n self.functionExecutionRunning = False # stop the execution thread of functions\n\n def __init_logger(self, level):\n \"\"\"setup the logger object\"\"\"\n self.logger = logging.getLogger(\"Model-\"+'%08x' % random.randrange(16 ** 8))\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n\n logfile = logging.FileHandler(\"./log/model.log\")\n logfile.setFormatter(formatter)\n self.logger.addHandler(logfile)\n\n\n self.logger.setLevel(level)\n\n def __get_id(self, id):\n \"\"\"\n Args:\n id (string): give a browsepath (\"root.myfolder.myvariable\") or a nodeId (\"10\")\n or a \"fancy\" path mixed like \"1000.min\" where 1000 is a node id, only the first is allowed as Nodeid, the followings are names\n Returns:\n (string): the node id as string\n None if not found\n \"\"\"\n if id in self.model:\n return id\n #maybe a browsepath?\n try:\n names = id.split('.')\n if names[0]==\"root\":\n names = names[1:]\n actualSearchId = \"1\"\n\n elif names[0] in self.model:\n #self.logger.debug(f\"fancy browsepath {names}\")\n actualSearchId = names[0]\n names = names[1:]\n else:\n return None\n except:\n return None\n\n #now we start at root\n for name in names:\n nextSearchId = None\n for childId in self.model[actualSearchId][\"children\"]:\n if self.model[childId][\"name\"] == name:\n #this is a match\n nextSearchId = childId\n break\n if not nextSearchId:\n return None\n #we found it, go deeper now\n actualSearchId = nextSearchId\n return actualSearchId\n\n def get_node(self,desc):\n \"\"\" instantiate a Node() object on the node given as desc\n Args:\n desc (string): give a browsepath (\"root.myfolder.myvariable\") or a nodeId (\"10\")\n Returns:\n (Node()): a node object of the given node\n None if not found\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if id:\n return Node(self,id)\n\n\n def find_node(self,search,matchProperty={}):\n \"\"\"\n the search is a match pattern for the path, we return the first match\n with\n \"\"\"\n with self.lock:\n for id in self.model:\n if search in self.get_browse_path(id):\n if matchProperty!={}:\n for k,v in matchProperty.items():\n if k not in self.model[id]:\n continue\n if self.model[id][k]!=v:\n continue\n return Node(self,id)\n return Node(self,id)\n return None\n\n def find_nodes(self,search,matchProperty={}):\n \"\"\"\n the search is a match pattern for the path, we return all matches as nodes\n \"\"\"\n found = []\n with self.lock:\n for id in self.model:\n if search in self.get_browse_path(id):\n if matchProperty!={}:\n for k,v in matchProperty.items():\n if k not in self.model[id]:\n break\n if self.model[id][k]!=v:\n break\n found.append(Node(self,id))\n return found\n\n def get_node_info(self,desc,includeLongValues=True):\n \"\"\"\n Args:\n desc (string): give a browsepath (\"root.myfolder.myvariable\") or a nodeId (\"10\")\n includeLongValue if true, we include values for columns and files\n Returns:\n (dict): a dictionary holding all properties of the node includin references and children\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id: return None\n\n #we do not include values of columns and files\n if self.model[id][\"type\"] in [\"column\",\"file\",\"timeseries\"]:\n if includeLongValues:\n return copy.deepcopy(self.model[id])\n else:\n return {k:v for k,v in self.model[id].items() if k!=\"value\"}\n elif self.model[id][\"type\"]== \"object\":\n return {k: v for k, v in self.model[id].items() if k != \"object\"} # don't take the \"object\" key\n else:\n #take all\n return copy.deepcopy(self.model[id])\n\n\n\n\n def __get_node_with_children(self,id,nodes,includeForwardRefs=True):\n \"\"\"\n recursive helper for get_branch\n\n \"\"\"\n if self.model[id][\"type\"] in [\"file\",\"column\",\"timeseries\"]:\n #we do not take these values\n nodes[id]={k:v for k,v in self.model[id].items() if k!=\"value\"} # copy the whole but leave out the value\n elif self.model[id][\"type\"] == \"referencer\":\n nodes[id] = self.model[id]\n if includeForwardRefs:\n #for referencers, we take the direct targets\n for targetId in self.model[id][\"forwardRefs\"]:\n if self.model[targetId][\"type\"] in [\"file\", \"column\",\"timeseries\"]:\n # we do not take these values\n target = {k: v for k, v in self.model[id].items() if k != \"value\"} # copy the whole but leave out the value\n else:\n target = copy.deepcopy(self.model[targetId])\n #xxx todo, we might take the wrong backrefs with us, also these target nodes might not have their parent here\n nodes[targetId]=target\n else:\n nodes[id]=self.model[id]\n\n for child in self.model[id][\"children\"]:\n nodes.update(self.__get_node_with_children(child,nodes,includeForwardRefs))\n\n\n return nodes\n\n\n def get_branch(self,desc,includeRoot=True,includeForwardRefs=True):\n \"\"\"\n get a branch of the model starting from desc including all children excluding:\n columns\n files\n for referencers, we do not follow deep search for leaves, we just include the first level referenced nodes\n referencers poiting to nodes that are not part of the branch will also be included\n\n\n Returns:\n a list of nodedicts that can be used as a full valid model again\n\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id: return None\n nodes = {}\n nodes.update(self.__get_node_with_children(id,nodes,includeForwardRefs))\n #now we also need all nodes to the desc\n if includeRoot:\n while self.model[id][\"parent\"]!=\"0\":\n #the parent is not invalid so take the parent, we don't make further check for files and otheres\n parentId = self.model[id][\"parent\"]\n parentNode = copy.deepcopy(self.model[parentId])\n parentNode[\"children\"]=[id] # the other side-children are not of interest\n nodes.update({parentId:parentNode})\n id = self.model[id][\"parent\"]\n\n return copy.deepcopy(nodes)\n\n def __get_node_with_children_pretty(self,id,depth = None,ignore = []):\n \"\"\"\n recursive helper for get_branch_pretty\n args:\n nodes: the nodes so far\n\n \"\"\"\n\n #t=utils.Profiling(f\"id {self.get_browse_path(id)}, ignore = {ignore}\")\n\n result = {}\n\n node = self.model[id]\n #create my properties\n props = {k: copy.deepcopy(v) for k, v in node.items() if k not in [\"value\", \"backRefs\", \"children\"]}\n if node[\"type\"] not in [\"file\", \"column\",\"timeseries\"]:\n # we also take the value then\n props[\"value\"] = copy.deepcopy(node[\"value\"])\n if node[\"type\"] == \"referencer\" and (depth is None or depth>0):\n #tt = utils.Profiling(\"get leaves\")\n leaves = self.get_leaves_ids(id)\n #print(tt)\n #tt.start(\"get leaves data\")\n forwards = [self.get_browse_path(leaf) for leaf in leaves]\n props[\"leaves\"]=forwards\n #tt.lap(\"1\")\n props[\"targets\"] = [self.get_browse_path(id) for id in self.model[id][\"forwardRefs\"]]\n props[\"leavesIds\"]=leaves\n props[\"leavesValues\"] = [self.get_value(id) if self.model[id][\"type\"] not in [\"file\",\"column\",\"timeseries\"] else None for id in leaves]\n #tt.lap(\"2\")\n validation = []\n props[\"leavesProperties\"]={}\n for id in leaves:\n prop = self.get_node_info(id,includeLongValues=False)\n if \"validation\" in prop:\n validation.append(prop[\"validation\"])\n else:\n validation.append(None)\n props[\"leavesProperties\"][id]=prop\n props[\"leavesProperties\"][id][\"browsePath\"]=self.get_browse_path(id)\n #tt.lap(\"3\")\n props[\"leavesValidation\"] = validation\n #print(tt)\n #make sure we have the browsepath on board\n if \"browsePath\" not in props:\n props[\"browsePath\"]=self.get_browse_path(id)\n result[\".properties\"]=props\n\n if depth is None or depth>0:\n #now the children\n nextDepth = None\n if depth is not None:\n nextDepth = depth -1\n for childId in node[\"children\"]:\n childPath = self.get_browse_path(childId)\n if any([ignoreName in childPath for ignoreName in ignore]):\n #self.logger.debug(f\"ignore {childPath}\")\n pass\n else:\n result[self.model[childId][\"name\"]]=self.__get_node_with_children_pretty(childId,nextDepth,ignore)\n #print(t)\n return result\n\n\n\n def get_branch_pretty(self,desc,depth=None,ignore = []):\n \"\"\"\n get a branch in the form\n \"child1\":{\"child3\":... \".type\":, \".value\"\n \"child2\":{\n the properties occurr in \".property\" style, the children are direct entries\n we only use names\n for the referencers, the \".forwardRefs\" are the leaves with full path: [\"root.folder1.tzarget2\",\"root.varibale.bare\"..]\n Args:\n desc [string] the root node to start from\n depth [int] the depth to look into\n \"\"\"\n with self.lock:\n #p=utils.Profiling(\"get_branch_pretty\")\n id = self.__get_id(desc)\n if not id: return None\n res = self.__get_node_with_children_pretty(id,depth,ignore)\n #self.logger.debug(p)\n return res\n\n\n\n\n\n\n\n\n\n def get_node_with_children(self,desc):\n \"\"\" retrieve node information including children of the first level\n Args:\n desc (string): give a browsepath (\"root.myfolder.myvariable\") or a nodeId (\"10\")\n Returns:\n (Node()): a node object of the given node including the browsepath\n None if not found\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id: return None\n\n response = copy.deepcopy(self.model[id])\n response[\"browsePath\"]=self.get_browse_path(id)\n if response[\"children\"]!=[]:\n children =[]\n for childId in response[\"children\"]:\n childInfo = copy.deepcopy(self.model[childId])\n childInfo[\"browsePath\"]=self.get_browse_path(childId)\n children.append(childInfo)\n response[\"children\"]=children\n return response\n\n\n def get_models(self):\n \"\"\"\n get the available model files from the disk under /models\n : Returns: a list of strings\n \"\"\"\n try:\n mydir = myGlobalDir\n os.chdir(mydir) # to enable import easily\n files = os.listdir(mydir + '/models')\n # take only the ones with '.json, but cut the '.model.json' extension\n models = [f.split('.model')[0] for f in files if f.endswith(\".json\")]\n\n return models\n except Exception as ex:\n self.logger.error(\"Model.get_models() failed \"+str(ex))\n return []\n\n def get_info(self):\n \"\"\"\n get some information about the model\n Returns: (dict) key value pairs on information of the model,\n \"\"\"\n return {\"name\":self.currentModelName}\n\n def import_plugins_from_directory(self, plugin_directory: str, check_file_marker = True):\n \"\"\" find all plugins from plugin_directory.\n take from there the templates from the files and the functions\n Args:\n check_file_marker: if set to True, we expect a \"#21datalabplugin\" string in the first line\n \"\"\"\n if plugin_directory not in sys.path:\n sys.path.append(plugin_directory) # for the importlib to find the stuff\n\n plugin_filenames = glob.glob(os.path.join(plugin_directory, '**/*.py'), recursive=True)\n for fileName in plugin_filenames:\n if fileName.startswith('__'):\n continue # avoid __pycache__ things\n #we need to check if extra plugins have the \"#21datalabplugin\n if check_file_marker:\n absolutePath = os.path.join(myGlobalDir,fileName)\n f = open(absolutePath,\"r\")\n firstLine = f.readline()\n f.close()\n if firstLine != \"#21datalabplugin\\n\":\n continue\n\n filename_relative = os.path.relpath(fileName, plugin_directory)\n moduleName = os.path.splitext(filename_relative)[0].replace(os.path.sep, '.')\n self.logger.info(f\"import plugin lib {moduleName}\")\n module = importlib.import_module(moduleName)\n module = importlib.reload(module) # if we change an already imported, python uses the cache, so to make sure we always get the latest, reimport here\n #now analyze all objects in the module\n for objName in dir(module):\n if objName.startswith('__'):\n continue # these are python generated info objects, we don't want them\n element = getattr(module,objName)\n if type(element) is dict:\n #this is a template information\n self.templates[moduleName+\".\"+objName]=copy.deepcopy(element)\n elif (inspect.isclass(element)):\n newClass = {\"module\":module,\"class\":element}\n self.objectClasses[moduleName + \".\" + objName] = newClass\n elif callable(element):\n #this is a function, get more info\n newFunction = {\"module\":module, \"function\":element}\n self.functions[moduleName+\".\"+objName]=newFunction\n def import_default_plugins(self):\n \"\"\" find all plugins (= all .py files in the ./plugin folder\n take from there the templates from the files and the functions\n don't check them for #21datalabplugin marker\n\n this function is execution on startup of the model\n\n \"\"\"\n self.import_plugins_from_directory(os.path.join(myGlobalDir, 'plugins'),check_file_marker=False)\n\n def get_id(self,ids):\n \"\"\" convert a descriptor or a list into only ids (which can be used as entry to the model dictionary\n Args:\n ids (string, list(string)): a single or list of strings containing either and id (\"101\") or browsepath (\"root.myfolder.myvar\")\n Returns:\n a list(id) or id as string\n \"\"\"\n with self.lock:\n if type(ids) == type(list()):\n newList = []\n for id in ids:\n newList.append(self.__get_id(id))\n return newList\n elif type(ids) == type(dict()):\n newDict = {}\n for oldId in ids:\n id = self.__get_id(oldId)\n newDict[id]=ids[oldId] #also copy the value\n return newDict\n else:\n #assume its scalar\n return self.__get_id(ids)\n\n def get_browse_path(self,desc):\n \"\"\"\n Args:\n desc(string): a node id or browsepatch\n Returns:\n (string) a browsepath\n \"\"\"\n with self.lock:\n id = self.get_id(desc)\n if not id in self.model:\n return None\n path = self.model[id][\"name\"]\n while 1:\n id = self.model[id][\"parent\"]\n if id ==\"0\":\n break\n else:\n path = self.model[id][\"name\"]+\".\"+path\n return path\n\n\n def push_nodes(self,nodeDicts):\n \"\"\"\n push a ready nodedict into the mode\n this is a dangerous function as it does not adjust references, parent/child relations whatsoever\n you must take care of that yourself\n \"\"\"\n for nodeDict in nodeDicts:\n self.logger.warning(f\"pushing node {nodeDict['id'], nodeDict['name']}\")\n self.model[nodeDict[\"id\"]]=copy.deepcopy(nodeDict)\n self.__notify_observers([],None) # just trigger the treeupdate for now\n #xxx todo notify!\n\n\n\n def create_node(self,parent=\"root\",type=\"folder\",value=None,name=\"newNode\",properties={}):\n \"\"\"\n create a node inside the model by giving several infos\n Args:\n parent: a descriptor (browsepath or id) of the parent\n type: the type of the node\n value: (optional) give a value for the node\n name(string): a name of the node, must be unique under the parent\n properties (dict): a dictionary containing further key-values to be placed into the node as properties\n Returns:\n (string) nodeid,\n None for problem durinf creation\n \"\"\"\n #check if parent exists\n with self.lock:\n parentId = self.get_id(parent)\n if not parentId:\n return None\n #check if same name existst already\n newpath = self.get_browse_path(parent)+\".\"+name\n if self.get_id(newpath):\n #we found it, it exists alreay, so we can't create it\n return None\n # we can create this node\n if self.idCreationHash == True:\n newId = str((random.randrange(2**64))) # a 64 bit random value\n else:\n self.globalIdCounter += 1\n newId = str(self.globalIdCounter)\n newNode = copy.deepcopy(self.nodeTemplate)\n newNode.update({\"id\":newId,\"name\":name,\"type\":type,\"parent\":parentId})\n if properties !={}:\n newNode.update(properties)\n if value != None:\n newNode[\"value\"]=value\n self.model[parentId][\"children\"].append(newId)\n self.model[newId] = newNode\n if newNode[\"type\"] == \"timeseries\":\n self.time_series_create(newId)\n if newNode[\"type\"] == \"eventseries\":\n self.event_series_create(newId)\n if newNode[\"type\"] == \"object\":\n if \"class\" not in newNode:\n newNode[\"class\"]=None\n if \"autoReload\" not in newNode:\n newNode[\"autoReload\"] = False # set this to true means: on a \"instantiate object, we reload the module\n self.__notify_observers(parentId,\"children\")\n return newNode[\"id\"]\n\n def create_node_from_path(self,path,properties={\"type\":\"variable\"}):\n \"\"\"\n create a node from a path given, all intermediate nodes of th path given that do not yet exist are also created as folder type\n Args:\n path(string): the path to the node to be creates\n properties(dict): the properties of the node\n example:\n create_node_from_path(\"root.myfolder.something.thisvar\")\n this will create myfolder as folder, something as folder, thisvar as variable and will also\n set all hierarchies correctly\n Returns:\n (string) the nodeid created or\n None if problem during creation\n \"\"\"\n currentNode = \"root\" #root\n with self.lock:\n for node in path.split('.')[1:-1]:\n if not self.__get_id(currentNode+'.'+node):\n #this one does not exist, so make it\n self.create_node(currentNode,name=node)\n currentNode += '.'+node\n\n return self.create_node(parent=currentNode,name=path.split('.')[-1],properties=properties)\n\n def create_nodes_from_template(self,parent=\"root\",template=[]):\n \"\"\"\n deprecated!! this is the old style of templates as lists, now it's a dict\n Create a node from a template; a template is a list of node-dicts,\n Args:\n parent(string): descriptor of the parent node under which the nodes of the template should be created\n template: a list of node dicts of the nodes to be creates, children are allowed as dict\n Returns:\n (boolenan) True for created, False for error\n Example:\n create_nodes_from_template(parent=\"root.myfolder\",[{\"name\":\"myvariable1\",\"type\":\"variable\"},\n {\"name\":\"myfolder\",\"type\":\"folder\",\"children\":[\n {\"name\":\"mysubvar\",\"type\":\"variable\"}]])\n \"\"\"\n\n with self.lock:\n parentId = self.get_id(parent)\n if not parentId:\n return False\n\n newNodeIds = [] #these must be corrected later\n for node in template:\n #we take all info from the nodes and insert it into the tree\n nodeName = node[\"name\"]\n newNodeId = self.create_node(parentId,name=nodeName,properties=node)\n newNodeIds.append(newNodeId)\n #do we have \"children per template syntax\"?, then remove that property from the nodes and make more nodes\n if \"children\" in self.model[newNodeId]:\n savedChildren = copy.deepcopy(self.model[newNodeId][\"children\"])\n self.model[newNodeId][\"children\"]=[] # empty out\n for child in savedChildren:\n newChildId = self.create_node(newNodeId,name=child[\"name\"],properties=child)\n newNodeIds.append(newChildId)\n\n #now correct missing stuff\n for nodeId in newNodeIds:\n if self.model[nodeId][\"type\"]== \"referencer\":\n # convert the path of references into an id: get the parent path, add the tail, convert to id\n forwardReferences =self.model[nodeId][\"forwardRefs\"] #make a copy, we'll delete this\n self.model[nodeId][\"forwardRefs\"]=[]\n parentPath = self.get_browse_path(self.model[nodeId][\"parent\"])\n for forwardRef in forwardReferences:\n forwardPath = parentPath+forwardRef\n self.add_forward_refs(nodeId,[forwardPath])\n return True\n\n\n def __create_nodes_from_path_with_children(self,parentPath,nodes):\n \"\"\"\n recursive helper function for create_template_from_path\n e build all nodes under the parentPath on this level and then the children\n we return a list of all created node ids\n \"\"\"\n createdNodes = []\n\n for node in nodes:\n newModelNode = {}\n for k, v in node.items():\n if k not in [\"children\", \"parent\", \"id\", \"browsePath\"]: # avoid stupid things\n newModelNode[k] = v\n newId = self.create_node_from_path(parentPath+'.'+newModelNode[\"name\"],newModelNode)\n if newId:\n createdNodes.append(newId)\n if \"children\" in node:\n createdNodes.extend(self.__create_nodes_from_path_with_children(parentPath+'.'+newModelNode[\"name\"],node[\"children\"]))\n return createdNodes\n\n\n\n def create_template_from_path(self,path,template):\n \"\"\"\n Create a template from a path given, the template contains one or more nodes\n the path must not yet exist!\n Args:\n path(string): the path under which the template will be placed. the template always contains\n a root node, this will be renamed according to the path\n Returns:\n (boolenan) True for created, False for error\n \"\"\"\n\n with self.lock:\n #first create the template root node\n #we rename the template according to the path requested\n template[\"name\"]=path.split('.')[-1]\n parentPath = '.'.join(path.split('.')[:-1])\n newNodeIds = self.__create_nodes_from_path_with_children(parentPath,[template])\n self.logger.debug(f\"create_template_from_path, new nodeids: {newNodeIds}\")\n\n #now adjust the references of new nodes and of the ones that were there\n for newNodeId in newNodeIds:\n if \"references\" in self.model[newNodeId]:\n #we must create forward references\n for ref in self.model[newNodeId][\"references\"]:\n # now there are two options:\n # the given path is of the form templatename.levelone.leveltwo inside the template\n # we replace the \"templatename\" with the path name the template was given\n # or the path is absolute id or browsepath, then we don't modify\n splitted = ref.split('.')\n if len(splitted) == 1 or splitted[0]==\"root\":\n targetPath = ref\n else:\n targetPath = parentPath+'.'+template['name']+'.'+'.'.join(ref.split('.')[1:])\n self.add_forward_refs(newNodeId,[targetPath])\n del self.model[newNodeId][\"references\"] # we remove the reference information from the template\n\n def get_templates(self):\n \"\"\"\n give all templates loaded\n Returns: a dict with entries containing the full templates\n \"\"\"\n with self.lock:\n return copy.deepcopy(self.templates)\n\n def add_forward_refs(self,referencerDesc,targets,allowDuplicates = True):\n \"\"\"\n adding forward references from a referencer to other nodes, the forward references are appended at the list\n of forward references of the referencer node\n references to oneself are not allowed\n\n Args:\n referenderDesc (string): descriptor of the referencer node from which we want to add forward references\n targets (list(descriptors)): listof node descriptors to which we want to add forward refs\n Returns:\n True/False for success\n \"\"\"\n with self.lock:\n fromId = self.get_id(referencerDesc)\n if not fromId:\n self.logger.error(\"can't set forward ref on \"+str(referencerDesc))\n return False\n\n if type(targets) is not list:\n targets = [targets]\n\n if targets==[]:\n return True\n\n if not self.model[fromId][\"type\"]==\"referencer\":\n self.logger.error(\"can't set forward ref on \"+str(referencerDesc)+ \"is not type referencer, is type\"+self.model[fromId][\"type\"])\n return False\n for target in targets:\n toId = self.get_id(target)\n if not toId:\n continue\n if toId == fromId:\n continue\n if not allowDuplicates:\n if toId in self.model[fromId][\"forwardRefs\"]:\n continue # ignore this forwards ref, we have it already\n self.model[toId][\"backRefs\"].append(fromId)\n self.model[fromId][\"forwardRefs\"].append(toId)\n self.__notify_observers(fromId,\"forwardRefs\")\n return True\n\n def lock_model(self):\n self.lock.acquire()\n\n def release_model(self):\n self.lock.release()\n\n def get_model(self):\n \"\"\"\n Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)\n \"\"\"\n with self.lock:\n #also add the browsepath to all nodes\n for nodeid in self.model:\n self.model[nodeid][\"browsePath\"]=self.get_browse_path(nodeid)\n return copy.deepcopy(self.model)\n\n def get_model_for_web(self,getHash=False):\n \"\"\"\n Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)\n but leaving out the column values (this can be a lot of data)\n and the file values (files are binary or strings with big size, typically serialized ML-models)\n for files and columns, we either return a string \"len 12344\" or a sha1 hash value 133344\n \"\"\"\n\n model = {}\n p=utils.Profiling(\"get_model_for_web\")\n with self.lock:\n for nodeId, nodeDict in self.model.items():\n if nodeDict[\"type\"] in [\"column\",\"file\",\"timeseries\",\"eventseries\"]:\n # with columns we filter out the values\n node = {}\n for nk, nv in nodeDict.items():\n if nk == \"value\":\n try:\n if not getHash:\n node[nk] = \"len \" + str(len(nv))\n else:\n start = datetime.datetime.now()\n hash = hashlib.sha1(nv.tobytes())\n node[nk] = hash.hexdigest()\n self.logger.debug(f\"hashed {nodeDict['name']} in {(datetime.datetime.now()-start).total_seconds()} hash:{node[nk]}\")\n except:\n node[nk] = \"None\"\n else:\n node[nk] = copy.deepcopy(nv) # values can be list, dict and deeper objects\n model[nodeId] = node\n elif nodeDict[\"type\"]==\"object\":\n node={k:v for k,v in nodeDict.items() if k!=\"object\"}\n model[nodeId]=node\n\n else:\n #this node is not a colum, can still hold huge data\n model[nodeId] = copy.deepcopy(nodeDict) # values can be list, dict and deeper objects nodeDict\n model[nodeId][\"browsePath\"] = self.get_browse_path(nodeId) #also add the browsepath\n self.logger.debug(f\"{p}\")\n return model\n\n\n def remove_forward_refs(self,sourceDesc,targetDescriptors = [], deleteDuplicates=False):\n \"\"\"\n remove forward references from a referencer, this also removes the backreference from the target\n Args:\n sourceDesc: the descriptor of the referencer node\n targets: a list of descriptors, if missing we delete all\n deleteDuplicates: if set true, we delete all referenes to a target if we hae more than one reference\n\n Returns:\n True/False for success\n \"\"\"\n with self.lock:\n fromId = self.get_id(sourceDesc)\n if not fromId:\n return False\n if not self.model[fromId][\"type\"] == \"referencer\":\n return False # only for referencers\n if targetDescriptors == []:\n targets = self.model[fromId][\"forwardRefs\"].copy()\n else:\n targets = self.get_id(targetDescriptors)\n\n if targets == []:\n return True# nothing to do\n\n for toId in targets:\n if not toId:\n continue # we skip Nones coming from the get_id\n if deleteDuplicates:\n # maybe multiple entries\n while toId in self.model[fromId][\"forwardRefs\"]: # maybe multiple entries\n self.model[fromId][\"forwardRefs\"].remove(toId)\n self.model[toId][\"backRefs\"].remove(fromId)\n else:\n # we delete only one entry\n self.model[fromId][\"forwardRefs\"].remove(toId)\n self.model[toId][\"backRefs\"].remove(fromId)\n self.__notify_observers(fromId,\"forwardRefs\")\n return True\n\n\n def remove_forward_ref(self,sourceDesc,targetDesc):\n \"\"\"\n remove a forward reference from a referencer, this also removes the backreference from the target\n Args:\n sourceDesc: the descriptor of the referencer node\n Returns:\n True/False for success\n \"\"\"\n with self.lock:\n fromId = self.get_id(sourceDesc)\n toId = self.get_id(targetDesc)\n if not fromId or not toId:\n return False\n if not self.model[fromId][\"type\"]==\"referencer\":\n return False # only for referencers\n\n try:\n self.model[fromId][\"forwardRefs\"].remove(toId)\n self.model[toId][\"backRefs\"].remove(fromId)\n self.__notify_observers(fromId, \"forwardRefs\")\n return True\n except:\n return False\n\n def remove_back_ref(self,sourceDesc,targetDesc):\n \"\"\"\n remove a backwards reference from any node to a referencer, this also removes the forwardreferece from the target\n actually, this function is just a helper. Normally, we only talk about \"forward references\";\n each forward reference also creates a backwards reference in the model, but this is just for internal look up speed\n the reference here is targetDesc -> (forwardRef) -> sourceDesc\n Args:\n sourceDesc: the descriptor of the node that holds a backwards reference\n targetDesc: the descriptor of the node that holds the forward reference\n Returns:\n True/False for success\n \"\"\"\n with self.lock:\n return self.remove_forward_ref(targetDesc,sourceDesc)\n\n def add_property(self,nodeDesc,property,value):\n \"\"\"\n add a random property entry for a node, a node is a key-value store, a property is a key with a value\n Args:\n nodeDesc: the descriptor of the node\n property: the key to be created on the node\n value: the value to be stored for this property\n Returns:\n True for create\n False for node not found or if the property already exists\n \"\"\"\n with self.lock:\n id = self.get_id(nodeDesc)\n if not id:\n return False\n if property in self.model[id]:\n return False # have this property already\n self.model[id][property]=value\n self.__notify_observers(id, property)\n return True\n\n def set_properties(self,properties={},nodeDesc=None):\n \"\"\"\n changes a random set of properties given by the dict or adds them if not existant, some properties are not allowed here:\n children, parent, forward and back ward refs, allowed are all others including type, name, value\n Args:\n nodeDesc: the descriptor of the node, is optional, can also be given as browsePath or id in he properties dict\n properties: the new properties or changed\n Returns:\n True for done\n False for node not found or if the property already exists\n \"\"\"\n with self.lock:\n if nodeDesc:\n id = self.get_id(nodeDesc)\n elif \"id\" in properties:\n id = properties[\"id\"]\n elif \"browsePath\" in properties:\n id = self.get_id(properties[\"browsePath\"])\n else:\n self.logger.error(\"set properties is missing id \")\n return False\n if not id:\n return False\n\n notificationProperties = []\n for k,v in properties.items():\n if k in [\"id\",\"browsePath\",\"children\",\"parent\",\"forwardRefs\",\"backRefs\"]:\n continue # we ignore these entries\n self.model[id][k]=v # overwrite or set new\n notificationProperties.append(k)\n self.__notify_observers(id,notificationProperties)\n return True\n\n\n def find_all_children_recursive(self,nodeIds):\n \"\"\" find all children recursively, give a list of \"\"\"\n with self.lock:\n children = []\n for id in nodeIds:\n if self.model[id][\"children\"]:\n children.extend(self.find_all_children_recursive(self.model[id][\"children\"]))\n children.append(id)\n return children\n\n\n #delete node and all subnodes\n def delete_node(self,desc):\n \"\"\"\n delete a node and all its recursive children;\n flow:\n 1) make a list of all nodes to be deleted\n 2) rip off all references to /from delete nodes\n 3) delete all nodes\n 4) notify observers about children change on the delete nodes\n\n desc(string): the descriptor of the node\n Returns:\n True for success\n False for node not found\n \"\"\"\n with self.lock:\n id = self.get_id(desc)\n if not id:\n return False\n\n nodesToDelete = self.find_all_children_recursive([id])\n self.logger.debug(f\"delete nodes {nodesToDelete}\")\n childNotify = []\n #first rip off all references\n for id in nodesToDelete:\n forwards = self.model[id][\"forwardRefs\"].copy()\n backwards = self.model[id][\"backRefs\"].copy()\n for forward in forwards:\n self.remove_forward_ref(id,forward) # this will also trigger observers\n for backward in backwards:\n self.remove_back_ref(id,backward) # this will also trigger observers\n\n #now delete the acutal nodes\n for id in nodesToDelete:\n parentId = self.model[id][\"parent\"]\n if parentId in self.model:\n self.model[parentId][\"children\"].remove(id)\n childNotify.append(parentId)\n if self.model[id][\"type\"]==\"timeseries\":\n self.time_series_delete(id)\n del self.model[id]\n\n #now notify only those who still exist\n goodNotify=[]\n for id in childNotify:\n if id in self.model:\n goodNotify.append(id)\n if goodNotify:\n self.__notify_observers(goodNotify, \"children\") # make ONE call for the observers\n\n return True\n\n\n # if desc.type is a var, function then we just set the value\n # if it's a timeseries\" then we set a column in a table, padded if needed\n def set_value(self,desc,value):\n \"\"\"\n set the value property of a node, if the node does not have a value property yet, it is created here\n Args:\n desc(string): node descriptor\n value (any): any value to be stored\n \"\"\"\n with self.lock:\n id = self.get_id(desc)\n if not id: return None\n #convert if table:\n if self.model[id][\"type\"] == \"column\":\n value = numpy.asarray(value,dtype=numpy.float64)\n\n self.model[id][\"value\"] = value\n self.__notify_observers(id,\"value\")\n return True\n\n def get_value(self,desc):\n \"\"\"\n read out the \"value\" property of a node\n Args:\n desc(string): the node that holds the value\n Returns:\n the value\n None if the node has no \"value\" property\n \"\"\"\n with self.lock:\n id = self.get_id(desc)\n if not id: return None\n\n if self.model[id][\"type\"] == \"timeseries\":\n values = self.time_series_get_table(id)\n if values:\n return self.time_series_get_table(id)[id][\"values\"]\n else:\n return None\n\n if \"value\" in self.model[id]:\n return copy.deepcopy(self.model[id][\"value\"])\n else:\n return None\n\n\n\n def __copy_node(self,id,resolveChildren=False):\n \"\"\"\n get a copy of a node, we don't create a node in the model here!\n copy node with all properties, if the node is a \"column\", we don't copy the value\n if the resolveChildren is set to true, we also copy the direct children\n the copied node can't be used to create a node, as it is the copy of an existing node!\n Args:\n id (string): the node id to be copied\n resolveChildren (bool): False to not copy the children (the new node has no children)\n True to copy-create also the children\n Return:\n (dict) the node\n \"\"\"\n newNode = {}\n for key in self.model[id]:\n if key == \"value\" and self.model[id][\"type\"]in [\"column\",\"file\",\"timeseries\"]:\n newNode[\"value\"]=None\n elif key == \"children\" and resolveChildren:\n #we also copy the children\n newNode[\"children\"]=[]\n for childId in self.model[id][\"children\"]:\n childNode = self.__copy_node(childId)\n newNode[\"children\"].append(childNode)\n else:\n newNode[key]=copy.deepcopy(self.model[id][key])\n return newNode\n\n\n\n def __get_targets(self,id):\n \"\"\"\n #this is a recusive helper function for the get_leaves function\n \"\"\"\n targets=[]\n if self.model[id][\"type\"] == \"referencer\":\n for targetId in self.model[id][\"forwardRefs\"]:\n targets.extend(self.__get_targets(targetId))\n elif self.model[id][\"type\"] == \"folder\":\n for targetId in self.model[id][\"children\"]:\n targets.extend(self.__get_targets(targetId))\n else:\n addNode = self.__copy_node(id,resolveChildren=True)\n addNode[\"browsePath\"]=self.get_browse_path(id)\n targets = [addNode]\n return targets\n\n\n def get_leaves_ids(self,desc):\n \"\"\"\n get the list of ids of the leaves, see get_leaves()\n Returns:\n a list of ids of the leaves\n \"\"\"\n leaves = self.get_leaves(desc) # a list of node dicts\n leaveIds = []\n for leave in leaves:\n leaveIds.append(leave[\"id\"])\n return leaveIds\n\n\n def get_leaves(self,desc,allowDuplicates=False):\n \"\"\"\n this function returns a list of dicts containing the leaves where this referencer points to\n this functions works only for nodes of type \"referencer\", as we are following the forward references\n leaves are defined as following:\n 1) all nodes that are listed under the forward references and which are not of type referencer or folder\n 2) if nodes pointed to are referencer, the targets are again analyzed\n 3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves\n folders and referencers inside the folder are not taken into account\n doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1\n Returns:\n all node dicts which are considered leaves as a list of node dicts\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id:return None\n\n targets=self.__get_targets(id)\n if targets and targets[0][\"id\"] == id:\n #this can happen if the node is not a folder, ref and had no children\n targets.pop(0)\n\n #before we return, we remove duplicates if wanted\n if targets and allowDuplicates == False:\n reducedTargets = []\n ids = []\n for t in targets:\n if t[\"id\"] in ids:\n continue\n reducedTargets.append(t)\n ids.append(t[\"id\"])\n return reducedTargets\n else:\n return targets\n\n def __get_referencer_parents(self,ids):\n backRefs = []\n #we look back from this node\n\n for id in ids:\n if self.model[id][\"type\"] == \"referencer\":\n #we take this one in\n backRefs.append(id)\n #plus we look further up\n thisBackRefs = self.model[id][\"backRefs\"]\n if thisBackRefs:\n backRefs.extend(self.__get_referencer_parents(thisBackRefs))\n return backRefs\n\n\n def get_referencers_old(self,desc):\n \"\"\"\n find the referencers pointing to a node via the \"leaves algorithm\"\n initially, we take the parent and the backref referencers\n Args:\n deep: we support the reverse leave-algorithms including any depth of children level after the last referencer,\n e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id:return None\n\n ids = [self.model[id][\"parent\"],id]\n\n if \"0\" in ids:\n ids.remove(\"0\")\n\n referencers = self.__get_referencer_parents(ids)\n return referencers\n\n\n def get_referencers(self,desc,deepLevel = 1):\n \"\"\"\n find the referencers pointing to a node via the \"leaves algorithm\"\n initially, we take the parent and the backref referencers\n Args:\n deepLevel: we support the reverse leave-algorithms including any depth of children level after the last referencer,\n e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match\n we give the number of parent levels to include in the search at the leaves\n default is 1, so the node itself and its parent\n\n \"\"\"\n with self.lock:\n id = self.__get_id(desc)\n if not id:return None\n\n if not deepLevel:\n ids = [self.model[id][\"parent\"],id]\n else:\n ids = self._get_parents(id,deepLevel)\n\n if \"0\" in ids:\n ids.remove(\"0\")\n\n referencers = self.__get_referencer_parents(ids)\n return referencers\n\n def _get_parents(self,id,deepLevel = -1):\n ids = []\n while id != \"1\" and deepLevel >= 0:\n ids.append(id)\n deepLevel -=1\n id = self.model[id][\"parent\"]\n return ids\n\n #get a table with values like in the table stored, start and end times are optional\n # if start, end not given, then we get the full table with no postprocessing at all\n def get_timeseries_table_old(self,variables,startTime=None,endTime=None,noBins=None,agg=\"sample\",includeTimeStamps=None,includeBackGround=None):\n with self.lock:\n variables = self.get_id(variables)\n return self.timeSeriesTables.get_value_table(variables, startTime=startTime, endTime=endTime, noBins=noBins,\n agg=agg,\n includeTimeStamps=includeTimeStamps) # ,startTime,endTime)\n '''\n if startTime == None and endTime ==None:\n #request the full table\n variables = self.get_id(variables) # convert all to ids\n return self.timeSeriesTables.get_value_table(variables,startTime=startTime,endTime=endTime,noBins=noBins,agg=agg,includeTimeStamps=includeTimeStamps)#,startTime,endTime)\n else:\n # this is a more details request, we will try to deliver the data in bins and with\n # aggretation postprocessing\n variables = self.get_id(variables) # convert all to ids, not browsepath\n return self.timeSeriesTables.get_value_table(variables,startTime,endTime,noBins,agg,includeTimeStamps=includeTimeStamps)\n '''\n\n #used in the Node class, give a column variable or the table itself, return the nodeid of the time variable of that table\n def find_table_time_node(self,desc):\n with self.lock:\n table = self.__find_table(self.get_id(desc))\n if not table:\n return None\n\n pathToTimeIndex = self.get_browse_path(table)+\".timeField\"\n timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id'] # this referencer must point to only one node\n return timeColumnId\n\n def find_table_node(self,desc):\n \"\"\"\n get the node id of a table giving a column node of the table as input\n Args\n desc[string]: a node descriptor of a column node belonging to the table\n Returns:\n the node id of the table node\n\n \"\"\"\n with self.lock:\n return self.__find_table(desc)\n\n def get_child(self,desc,childName):\n \"\"\"\n get a child based on the name given\n Args:\n desc: node descriptor of the node under which we look for children\n name: the child name to look for\n Returns:\n a nodeid if we find the child with \"name\" under the desc or none if not found\n :return:\n \"\"\"\n with self.lock:\n nodeInfo = self.get_node_info(desc)\n if nodeInfo:\n for childId in nodeInfo['children']:\n childInfo = self.get_node_info(childId)\n if childInfo[\"name\"] == childName:\n return childId\n return None\n\n def get_children_dict(self,desc):\n \"\"\"\n create a dictionary with key= childName and value = nodedict\n Args:\n desc: the nodedescriptor\n Returns:\n a dict\n \"\"\"\n with self.lock:\n childrenDic={}\n id = self.get_id(desc)\n if not id:\n return None\n\n for childId in self.model[id][\"children\"]:\n child = self.get_node_info(childId)\n childrenDic[child[\"name\"]]=child\n return childrenDic\n\n\n\n def get_table_len(self,desc):\n \"\"\"\n get the current length of a table\n Args:\n desc: the node descriptor of type table\n Returns:\n the current length of the columns of the table, none if error\n \"\"\"\n with self.lock:\n\n tableId = self.get_id(desc)\n if not tableId: return None\n if not self.model[tableId][\"type\"]==\"table\": return None\n\n try:\n columnid = self.get_child(tableId,\"columns\")\n if not columnid: return None\n columnIds = self.get_leaves_ids(columnid)\n if columnIds:\n return len(self.model[columnIds[0]][\"value\"])\n except:\n return None\n\n\n\n\n def get_timeseries_table(self,variables,startTime=None,endTime=None,noBins=None,agg=\"sample\",includeTimeStamps=None,format=\"array\",includeBackGround=None):\n \"\"\"\n get a time series table from variables. The table is returned as a list[list] object\n all variables requested must be of type \"column\" and must belong to the same table:\n all columns requested here must have a direct backreference to the same node of type \"columns\"\n todo: also allow \"columns\" to point to folders or multiple hierarchies of referencing/folders\n\n Args:\n variables (list(nodedescriptors)): nodes to be part the data table requested (ordered!)\n startime, endTime: the start and endtime of the table given as seconds since epoch\n #we also allow the special case of endTime = 0 and startTime = -interval\n # we also allow the special case of startTime given and end time= 0\n noBins(int): the number of samples to be returned inside the table between start end endtime,\n if None is given, we return all samples (rows) we have in the table and to not aggregate\n agg(string): the aggregation function to be used when we downsample the data,\n \"sample\": this means, we just pick out values (we sample) the data set, this is actually not an aggregation\n includeTimesStampe (bool): currently ignored\n includeBackGround (bool): currently ignored\n Returns(dict)\n key : value\n \"__time\" : list of timestamps for the returned table in epoch seconds\n \"variable1\": the list of float values of one of the requested variables\n \"\"\"\n with self.lock:\n #first check if all requested timeseries are columns from the same table\n vars = self.get_id(variables)\n table = []\n for var in vars:\n if self.model[var][\"type\"] != \"column\":\n self.logger.warn(\"requested time series but not column type\")\n return False\n table.append(self.__find_table(var))\n if len(set(table)) != 1 or set(table)== {None}:\n self.logger.warning(\"not the same table\")\n return False\n\n #get the time field, and make fancy indexing via numpy arrays\n pathToTimeIndex = self.get_browse_path(table[0])+\".timeField\"\n timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id']\n if startTime and endTime:\n times = numpy.asarray(self.model[timeColumnId][\"value\"])\n indices = numpy.where((times>=startTime) & (times<=endTime))[0]\n #xxx todo find the right index\n elif startTime and not endTime:\n #special cases for [-startTime:] and [startTime:] requests\n if startTime < 0:\n #this is the special case that we take an interval from the end\n endTime = self.model[timeColumnId][\"value\"][-1]# the last\n startTime = endTime +startTime # as startTime is negative this is actually substraction\n else:\n #starttime is positive\n pass\n times = numpy.asarray(self.model[timeColumnId][\"value\"])\n indices = numpy.where(times >= startTime)[0]\n else:\n indices = numpy.arange(0,len(self.model[timeColumnId][\"value\"])) ## all indices\n\n #now resample the indices to have the right bins number\n if noBins:\n varIndices = np.linspace(indices[0], indices[-1], noBins, endpoint=False, dtype=int)\n else:\n varIndices = indices\n\n if format==\"array\":\n result = []\n for var in variables:\n original = np.asarray(self.model[self.get_id(var)][\"value\"])[varIndices] # fancy indexing\n data=original.tolist() # apply the selection with the indices list\n result.append(data)\n else:\n result = {}\n for var in variables:\n original = np.asarray(self.model[self.get_id(var)][\"value\"])[varIndices] # fancy indexing\n data = original.tolist() # apply the selection with the indices list\n result[var]=data\n result[\"__time\"]=np.asarray(self.model[timeColumnId][\"value\"])[varIndices].tolist()\n return result\n\n\n\n\n\n def add_timeseries(self,blob,fast=False):\n \"\"\"\n add a dictionary of variables to a table, we check if the variables belong to the same table\n also, times that come in as datetime object are converted to epoch seconds\n Args:\n blob (dict): a dictionary containing keys (node descriptors) and values (scalars)\n Returns:\n True/False for success\n \"\"\"\n with self.lock:\n table = []\n for key in blob:\n id = self.get_id(key)\n if not id:\n self.logger.warn(\"add_timeseries count not find the variable:\" + str(key))\n return False\n if self.model[id][\"type\"] != \"column\":\n self.logger.warn(\"requested time series but not column type\")\n return False\n table.append(self.__find_table(id))\n\n if len(set(table)) != 1 or set(table) == {None}:\n self.logger.warn(\"not the same table\")\n return False\n #here, the request is parsed as ok, let's put the values\n for key in blob:\n id = self.get_id(key)\n value = blob[key]\n if type(self.model[id][\"value\"]) is not list:\n self.model[id][\"value\"]=[]\n\n #we auto-convert time stamps\n if type(value) is datetime.datetime:\n value = date2secs(value)\n self.model[id][\"value\"].append(value)#finally put the value\n\n\n\n #return the id of the table, give a column variable\n def __find_table(self,desc):\n \"\"\"\n return the node id of the table, give a column variable\n !! this has no lock, must be called under lock\n Args:\n desc(string): node descriptor of type column or the table itself\n Returns:\n the node id of the table to which the desc node belongs\n \"\"\"\n id = self.get_id(desc)\n if not id: return False\n\n if self.model[id][\"type\"] == \"table\":\n return id\n\n for ref in self.model[id][\"backRefs\"]:\n if self.model[ref][\"name\"] == \"columns\":\n return self.model[ref][\"parent\"]\n return None\n\n def ts_table_add_blob(self,dataBlob):\n \"\"\"\n this function add a data blob to an existing table, it accepts multiple values at once to speed up internals\n Args:\n dataBlob (dict or list(dict)): containing key:value pair with key=a descriptor of a column of one table\n value: a scalar or list or numpy array of values\n \"\"\"\n\n if type(dataBlob) is list:\n self.logger.error(\"currently no support for list blobs\")\n return None\n with self.lock:\n\n #first find the table and decide for the type conversion\n for key in dataBlob:\n if key != '__time':\n tableId = self.__find_table(key)\n break\n if not tableId:\n self.logger.error(\"can't find the table of \"+str(dataBlob[list(dataBlob.keys())[0]]))\n tableNode =self.get_node(tableId)\n columnsType = numpy.float64 # this is the default\n\n # make sure the time is there and convert it: we accept datetime objects, iso strings or floats seconds\n # plus, the key will be the time node id afterwards\n timeNode = tableNode.get_child(\"timeField\").get_leaves()[0]\n #try to find the time entry in the dataBlob, rename it to the timenode id\n timeKeyOptions = ['__time',timeNode.get_browse_path(),timeNode.get_id()]\n for timeKeyOption in timeKeyOptions:\n if timeKeyOption in dataBlob:\n dataBlob[timeNode.get_id()] = dataBlob.pop(timeKeyOption) # from now on the time field is names as its browsepath\n break\n if timeNode.get_id() not in dataBlob:\n self.logger.error(\"time field entry missing\")\n return False\n\n #now check if all are on the same table and convert the keys to node ids\n variables = list(dataBlob.keys())\n for var in variables:\n if self.__find_table(var) != tableId:\n self.logger.error(\"variables are not on the same table\")\n return False\n id = self.get_id(var)\n if id != var:\n dataBlob[self.get_id(var)]=dataBlob.pop(var) # make new entry as nodeid\n\n\n\n #now check the sizes of the incoming data and convert them to the requested type\n inputSizes = set()\n for key,value in dataBlob.items():\n if key == timeNode.get_id():\n #if we handle the time node, we might have to convert\n if type(value) is list or type(value) is numpy.ndarray:\n newValues = []\n #newValues = numpy.asarray([],dtype=numpy.float64)\n for val in value:\n newValues.append(date2secs(val))\n dataBlob[key] = numpy.asarray(newValues,dtype=numpy.float64) # write it back to the data\n else:\n #it is a scalar\n dataBlob[key] = numpy.asarray([date2secs(value)],dtype=numpy.float64)\n else:\n if numpy.isscalar(dataBlob[key]):\n dataBlob[key]=numpy.asarray([dataBlob[key]],dtype=columnsType) # make a list if it is scalar\n else:\n dataBlob[key]=numpy.asarray(dataBlob[key],dtype=columnsType) # if it is a numpy array already, numpy makes no copy\n inputSizes.add(dataBlob[key].shape[0])\n if len(inputSizes)!=1:\n self.logger.error(\"incoming data has different len, can't hande as padding is unclear\")\n\n # when we are here, we have converted all incoming data ot numpy arrays, all belong to the same table\n # and all have the same length, we are ready to put them inside\n #print(\"through\")\n #now append them\n return self.__ts_table_add_row(dataBlob,tableNodeId=tableId)\n\n\n def __ts_table_add_row(self,dataBlob,tableNodeId=None,autoPad=True,pad=numpy.NaN):\n \"\"\"\n must be called under lock !!\n this function accepts a dataBlob which is ready to be inserted, we don't make any more checks here\n it must use variables from one table, it must contain data as numpyarrays\n variables of the tables which are missing will be filled with pad if autoPad is true\n \"\"\"\n if not tableNodeId:\n tableNode = self.get_node(self.__get_table(list(dataBlob.keys())[0]))\n else:\n tableNode = self.get_node(tableNodeId)\n\n dataLen = dataBlob[list(dataBlob)[0]].shape[0]\n columnNodes = tableNode.get_child(\"columns\").get_leaves()\n for columnNode in columnNodes:\n id = columnNode.get_id()\n if id in dataBlob:\n #we add that one to the table\n if type(self.model[id]['value']) != numpy.ndarray:\n self.model[id]['value'] = dataBlob[id]\n else:\n self.model[id]['value'] = numpy.append(self.model[id]['value'],dataBlob[id])\n else:\n #we must pad\n self.loger.debug(\"we are padding \"+id+\" with % \",dataLen)\n if type(self.model[id]['value']) != numpy.ndarray:\n self.model[id]=numpy.full(dataLen,numpy.nan)\n else:\n self.model[id]['value'] = numpy.append(self.model[id]['value'],numpy.full(dataLen,numpy.nan))\n\n return True\n\n\n def append_table(self,blob,autocreate=True,autopad=True, timeSorted = False):\n \"\"\"\n this function accepts a dictionary containing paths and values and adds them as a row to a table\n if autoPad is True: it is allowed to leave out columns, those will be padded with numpy.inf,\n if autocreate is True: it is allowed to add unknown colums, those will be added automatically under the given name\n\n Args:\n blob(dict):\n keys: node descriptors,\n values: value to be appended to the table (scalar or list per variable is allowed\n the times should be given in a variable ending with \".time\"\n if the table exists already and has another node for the time-values, then we take the .time values and put them on the timenode\n autocreate(bool): if set to true and the nodes or table in the dict do not exist yet, we autocreate a table\n autopad(bool) if set to true, we automatically pad values in an existing table if variables of the table are not part of the blob\n doing so, we keep consistent lenght for all columns of a table\n\n \"\"\"\n\n #first check if we need to autocreate something, also check if we have multiple tables in play\n with self.lock:\n autocreates = []\n tableId = None\n columnsId = None\n numberOfRows = None\n for key in blob:\n id = self.__get_id(key)\n if not id:\n if not autocreate:\n self.logger.warn(\"appending table with unknown variables\")\n return None\n else:\n #we create this thing later\n autocreates.append(key)\n else:\n #the id was found, let's find the right table\n for ref in self.model[id][\"backRefs\"]:\n if self.model[ref][\"name\"] == \"columns\":\n #this is our table\n if not tableId:\n tableId = self.model[ref][\"parent\"]\n columnsId = ref\n numberOfRows = len(self.model[id][\"value\"])\n else:\n if tableId != self.model[ref][\"parent\"]:\n self.logger.warn(\"mixed tables request\")\n return None\n\n\n self.logger.debug(\"append table \"+str(self.get_browse_path(tableId)))\n if autocreates and autocreate:\n #do we even have to create our table?\n if not tableId:\n #make a table structure based on the names given\n tableName = autocreates[1].split('.')[1]+\"_autotable\"\n tableId = self.create_node(parent=\"root\",name=tableName,properties={\"type\":\"table\"})\n columnsId = self.create_node(parent=tableId,name=\"columns\",properties={\"type\":\"referencer\"})\n timeId = self.create_node(parent=tableId, name=\"timeField\", properties={\"type\": \"referencer\"})\n numberOfRows=0\n else:\n #if we don't create the table, here is our timeId\n timeReferencer = self.get_child(tableId, \"timeField\")\n timeId = self.get_leaves_ids(timeReferencer)[0]\n #we also then don't create any new time-field\n autocreates = [path for path in autocreates if path[-5:]!=\".time\"]\n\n self.logger.debug(f\"table var autocreates: {autocreates}\")\n for path in autocreates:\n id = self.create_node_from_path(path,properties={\"type\":\"column\"})\n self.model[id][\"value\"]=numpy.full(numberOfRows,numpy.inf)\n self.add_forward_refs(columnsId,[id])\n if path.split('.')[-1]==\"time\":\n #we just created the time field, we must also give the table struct the info\n self.add_forward_refs(timeId,[id])\n\n tableColumnIds = self.get_leaves_ids(columnsId) # a list of the ids of the columns\n timeReferencer = self.get_child(tableId,\"timeField\")\n timeId = self.get_leaves_ids(timeReferencer)[0]\n timePath = None\n for path in blob:\n if path[-5:] == \".time\":\n timePath = path\n if not timePath:\n self.logger.error(\"no time path given\")\n return False\n\n #now make arrays of all values\n for k,v in blob.items():\n if type(v) is list or type(v) is numpy.ndarray:\n blob[k]=numpy.asarray(v,dtype=numpy.float64)\n else:\n blob[k] = numpy.asarray([v], dtype=numpy.float64)\n\n\n valuesLen = len( blob[list(blob.keys())[0]] )\n tableLen = len ( self.get_value(timeId))\n if not timeSorted:\n #just append\n for path in blob:\n if path.split('.')[-1]==\"time\":\n id = timeId # we take the existing time Node of the table instead of just the variable named \"time\"\n else:\n id = self.get_id(path) # here\n self.model[id][\"value\"] = numpy.append(self.model[id][\"value\"],blob[path]) #todo: this is a very inefficient copy and reallocate\n if id in tableColumnIds:\n tableColumnIds.remove(id)\n #append this value\n for id in tableColumnIds:\n self.model[id][\"value\"] = numpy.append(self.model[id][\"value\"],numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf\n\n #now trigger observser\n self.__notify_observers(self.get_leaves_ids(columnsId),\"value\")\n else:\n #time sorted: find a place to insert the data in the times\n currentTimes = numpy.asarray(self.get_value(timeId),dtype=numpy.float64)\n startTime = blob[timePath][0]\n endTime = blob[timePath][-1]\n firstIndexGreaterStart, = numpy.where(currentTimes>startTime) #where returns tuple\n if len(firstIndexGreaterStart) == 0:\n firstIndexGreaterStart = tableLen\n else:\n firstIndexGreaterStart=firstIndexGreaterStart[0]\n\n firstIndexGreaterEnd, = numpy.where(currentTimes > endTime)\n if len(firstIndexGreaterEnd) == 0:\n firstIndexGreaterEnd = tableLen\n else:\n firstIndexGreaterEnd=firstIndexGreaterEnd[0]\n\n if firstIndexGreaterEnd != firstIndexGreaterStart:\n self.logger.error(\"we can't insert the data in a row-wise time manner, only as block\")\n return False\n\n startIndex = firstIndexGreaterStart # the position to insert the incoming data\n self.logger.debug(f\"insert data @{startIndex} of {tableLen}\")\n for path in blob:\n if path.split('.')[-1]==\"time\":\n id = timeId # we take the existing time Node of the table instead of just the variable named \"time\"\n else:\n id = self.get_id(path) # here\n self.model[id][\"value\"] = numpy.insert(self.model[id][\"value\"],startIndex,blob[path]) #todo: this is a very inefficient copy and reallocate\n if id in tableColumnIds:\n tableColumnIds.remove(id)\n #append this value\n for id in tableColumnIds:\n self.model[id][\"value\"] = numpy.insert(self.model[id][\"value\"],startIndex,numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf\n\n\n #\n pass\n return True\n\n\n\n def __show_subtree(self,rootId):\n currentBrowsePath = self.get_browse_path(rootId)\n indentation = \"| \"*(len(currentBrowsePath.split('.'))-1)\n print (indentation+\"-\",self.model[rootId][\"name\"],end=\"\")\n noShowProperties=[\"name\",\"parent\",\"children\"]\n for property in self.model[rootId]:\n try:\n if property==\"value\" and len(self.model[rootId][\"value\"])>10:\n print(\",len:\"+str(len(self.model[rootId][\"value\"])),end=\"\")\n except:\n pass\n if not property in noShowProperties:\n try:\n #if this entry has a len and the len is larger then 20, show only a part of it\n if len(self.model[rootId][property]) > 10:\n print(\",\" + property + \"=\" + str(self.model[rootId][property][0:10])+\"...(\"+str(len(self.model[rootId][property]))+\")\", end=\"\")\n else:\n print(\",\" + property + \"=\" + str(self.model[rootId][property]), end=\"\")\n except:\n print(\",\" + property + \"=\" + str(self.model[rootId][property]), end=\"\")\n\n if self.model[rootId][\"type\"]==\"timeseries\":\n print(\",\"+self.time_series_get_info(rootId), end=\"\")\n\n print(\"\")\n for child in self.model[rootId][\"children\"]:\n self.__show_subtree(child)\n\n def execute_object_function(self,desc,functionName,parameter=None):\n with self.lock:\n id = self.get_id(desc)\n object = self.get_object(id)\n if not object:\n return False\n try:\n functionPointer = getattr(object,functionName)\n self.executionQueue.put({\"functionPointer\":functionPointer,\"parameter\":parameter,\"id\":id})\n return True\n except:\n self.logger.error(f\"function {functionName} not sttr of object {desc} {object}\")\n return False\n\n\n def execute_function(self,desc,parameter = None):\n \"\"\"\n create a thread to execute a function there,\n if the function has autoReload, we re-import the external\n file\n Args:\n desc: node descriptor of the node (type \"function\") to be executed\n Returns:\n True if the execution thread was launched\n \"\"\"\n\n with self.lock:\n\n\n id = self.get_id(desc)\n if self.model[id][\"type\"]!= \"function\":\n return False\n\n functionName = self.model[id][\"functionPointer\"]\n if not functionName in self.functions:\n self.logger.error(f\"can't find function {functionName} in global list\")\n return False\n\n functionNode = self.get_node(id)\n\n executionType = functionNode.get_child(\"control\").get_child(\"executionType\").get_value()\n if executionType in [\"async\",\"sync\"]:\n self.executionQueue.put(id)\n self.logger.info(f\"function {desc} queued for execution\")\n return True\n elif executionType ==\"threaded\":\n self.logger.info(f\"function {desc} started in thread\")\n thread = threading.Thread(target=self.__execution_thread, args=[id])\n thread.start()\n return True\n else:\n self.logger.error(f\"function {desc} cant be started, unknown execution type {executionType}\")\n return False\n\n #check if function is interactive, then we reload it right now\n if self.model[id][\"autoReload\"] == True and self.global_auto_reload_enabled():\n #if self.functions[functionName][\"isInteractive\"]:\n # must reload the module\n module = importlib.reload(self.functions[functionName][\"module\"])\n functionPointer = getattr(module,functionName.split('.',1).pop())\n #now update our global list\n self.functions[functionName][\"module\"] = module\n self.functions[functionName][\"function\"] = functionPointer\n\n #here, the lock is open again!\n try:\n if executionType == \"async\" or executionType == \"threaded\":\n thread = threading.Thread(target=self.__execution_thread, args=[id])\n thread.start()\n return True\n elif executionType == \"sync\":\n self.__execution_thread(id) # call it sync here\n return True\n else:\n self.logger.error(\"unsupported execution type\"+str(executionType)+\" in fuction\"+str(id))\n raise(Exception)\n except:\n return False\n\n def start_function_execution_thread(self):\n self.functionExecutionRunning = True\n self.functionExecutionThread = threading.Thread(target=self._function_execution_thread)\n self.functionExecutionThread.start()\n\n\n\n def _function_execution_thread(self):\n while self.functionExecutionRunning:\n try:\n nextId = self.executionQueue.get(timeout=1)\n self.logger.info(f\"now executing function {str_lim(nextId,300)}\")\n self.__execution_thread(nextId)\n except:\n pass\n\n def delete(self):\n self.functionExecutionRunning = False\n\n def exit(self):\n self.delete()\n\n def close(self):\n self.delete()\n\n def __dispatch(self,function,timeout,param):\n thread = threading.Thread(target=self.__dispatch_thread_function, args=[function,timeout,param])\n thread.start()\n\n\n def __dispatch_thread_function(self,function,timeout,param):\n time.sleep(timeout)\n function(param)\n #exit thread\n\n def reset_progress_bar(self,controlNode):\n controlNode.get_child(\"progress\").set_value(0)\n \n\n def __clone_children(self,source,dest):\n \"\"\" see def clone() for more info \"\"\"\n\n sourcePath = self.get_browse_path(source)\n destPath = self.get_browse_path(dest)\n\n for childName,childInfo in self.get_children_dict(source).items():\n childId = childInfo[\"id\"]\n if childInfo[\"type\"] in [\"timeseries\",\"file\",\"column\"]:\n self.logger.debug(f\"clone skip node {childInfo['name']}\")\n continue\n newProps = {k:v for k,v in childInfo.items() if k not in [\"parent\",\"children\",\"backRefs\",\"forwardRefs\",\"browsePath\",\"id\",\"name\"]}\n cloneId = self.create_node_from_path(destPath+\".\"+childInfo[\"name\"],properties=newProps)\n grandChildren = self.get_children_dict(childId)\n if grandChildren != {}:\n self.__clone_children(childId,cloneId)\n\n def __clone_referencer_targets(self,source,dest):\n \"\"\" see def clone() for more info \"\"\"\n sourcePath = self.get_browse_path(source)\n destPath = self.get_browse_path(dest)\n\n childIds = self.get_node_info(sourcePath)[\"children\"]\n while childIds:\n id = childIds.pop()\n info = self.get_node_info(id)\n if info[\"type\"]==\"referencer\":\n newreferencer = self.get_browse_path(id).replace(sourcePath, destPath)\n #now check: if the referencers points to something inside, we do the same but in the target root, else we take it as it is\n for targetId in info[\"forwardRefs\"]:\n targetPath = self.get_browse_path(targetId)\n newTargetPath = targetPath.replace(sourcePath,destPath)# if not found, we get it unchanged\n self.add_forward_refs(newreferencer,[newTargetPath])\n childIds.extend(info[\"children\"])\n\n\n\n\n\n def clone(self,desc):\n \"\"\"\n clone a node and all its subnodes (a whole branch)\n we will create all nodes which existed in the source branch, for the referencers we use this stategy:\n references pointing to a node under the source branch will be translated to references in the target branch\n poining to the corresponding new node in the target branch\n references pointing to outside the source branch will also be created in the cloned branch pointing to\n the same target\n\n Args:\n desc: the source node descriptor\n \"\"\"\n\n sourcePath = self.get_browse_path(desc)\n if not sourcePath:\n return False\n targetPath = sourcePath+\"_\"+getRandomId()\n sourceInfo = self.get_node_info(desc)\n transferRoot = self.create_node_from_path(targetPath,properties={\"type\":sourceInfo[\"type\"]})\n\n #now iterate over the nodes and children and create the same nodes\n self.__clone_children(desc,transferRoot)\n self.__clone_referencer_targets(sourcePath,transferRoot)\n return True\n\n def execute_synchronous(self,id):\n \"\"\"\n execute a function synchronously here (this can be useful when executing a function within another\n \"\"\"\n return self.__execution_thread(id)\n\n def __execution_thread(self,id):\n \"\"\"\n the thread function to execute functions\n it currently uses the global lock so it will lock out any other work on the model during execution\n all inputs and outputs are found in the model\n we also set the status and result from here, not needed to do that in the function\n Args:\n id: the node id of the function to be executed or the dict for an object call\n \"\"\"\n try:\n if type(id) is str:\n if self.model[id][\"type\"] == \"function\":\n isFunction = True\n else:\n isFunction = False\n\n\n with self.lock:\n\n if isFunction:\n if self.model[id][\"autoReload\"] == True and self.global_auto_reload_enabled():\n # must reload the module\n functionName = self.model[id][\"functionPointer\"]\n module = importlib.reload(self.functions[functionName][\"module\"])\n functionPointer = getattr(module, functionName.split('.', 1).pop())\n # now update our global list\n self.functions[functionName][\"module\"] = module\n self.functions[functionName][\"function\"] = functionPointer\n\n\n #self.logger.info(f\"in execution Thread {threading.get_ident()}, executing {id} {functionName}\")\n #check the function\n functionName = self.model[id][\"functionPointer\"]\n functionPointer = self.functions[functionName]['function']\n self.logger.info(f\"in execution Thread {threading.get_ident()}, executing {id} {functionName}\")\n else:\n functionPointer = id[\"functionPointer\"]\n functionName = functionPointer.__name__\n parameter = id[\"parameter\"]\n id = id[\"id\"] #for deeper down\n\n #now set some controls\n try:\n node = self.get_node(id)\n controlNode = node.get_child(\"control\")\n\n targetId = self.get_id(\"root.system.progress.targets\")\n if targetId:\n self.disable_observers()\n self.remove_forward_refs(targetId)\n self.add_forward_refs(targetId,[controlNode.get_child(\"progress\").get_id()])\n self.enable_observers()\n\n # we don't signal these things\n self.disable_observers()\n controlNode.get_child(\"status\").set_value(\"running\")\n controlNode.get_child(\"result\")#.set_value(\"pending\")\n controlNode.get_child(\"progress\").set_value(0)\n #controlNode.get_child(\"signal\").set_value(\"nosignal\")\n startTime = datetime.datetime.now()\n controlNode.get_child(\"lastStartTime\").set_value(startTime.isoformat())\n self.enable_observers()\n\n except:\n self.logger.error(\"error during execution preparation, this can be critical, maybe disabled observers\")\n self.log_error()\n pass\n\n # model lock open: we execute without model lock\n if isFunction:\n result = functionPointer(node) # this is the actual execution\n else:\n result = functionPointer(parameter)\n\n #now we are back, set the status to finished\n duration = (datetime.datetime.now()-startTime).total_seconds()\n\n with self.lock:\n # this is a bit dangerous, maybe the node is not there anymore?, so the\n # inner functions calls of node.xx() will return nothing, so we try, catch\n try:\n self.logger.debug(f\"function {functionName} execution completed in {duration} \")\n\n self.disable_observers() # we don't signal these\n controlNode.get_child(\"lastExecutionDuration\").set_value(duration)\n controlNode.get_child(\"status\").set_value(\"finished\")\n controlExecutionCounter = controlNode.get_child(\"executionCounter\")\n controlExecutionCounter.set_value(controlExecutionCounter.get_value() + 1)\n controlProgress = controlNode.get_child(\"progress\")#.set_value(0)\n controlProgress.set_value(0)\n self.enable_observers()\n\n self.notify_observers([controlExecutionCounter.get_id(),controlProgress.get_id()],\"value\")\n\n if not isFunction:\n result = True # for execution of member function we don't have a general return code\n if result == True:\n controlNode.get_child(\"result\").set_value(\"ok\")\n self.publish_event(\"result of \" + str(functionName) + \": \" + controlNode.get_child(\"result\").get_value())\n else:\n if controlNode.get_child(\"result\").get_value() == \"pending\":\n #if the functions hasn't set anything else\n controlNode.get_child(\"result\").set_value(\"error\")\n #also publish this result\n self.publish_event(\"error in \" + str(functionName) + \": \" + controlNode.get_child(\"result\").get_value())\n\n # except:\n # self.logger.error(\"problem setting results from execution of #\"+str(id))\n except Exception as ex:\n errorString = str(sys.exc_info()[1])\n self.logger.error(\"error inside execution thread, id\" +str(id)+\" functionname\"+str(functionName)+errorString+\" \"+str(ex)+\" \"+str(traceback.format_exc()))\n pass\n\n\n except Exception as ex:\n errorString = str(sys.exc_info()[1])\n self.logger.error(\"error inside execution thread, id \" +str(id)+\" functionname\"+str(functionName)+errorString+\" \"+str(ex)+\" \"+str(traceback.format_exc()))\n controlNode.get_child(\"status\").set_value(\"interrupted\")\n controlNode.get_child(\"result\").set_value(\"error:\"+errorString)\n controlNode.get_child(\"progress\").set_value(0)\n self.publish_event(\"error in \"+str(functionName)+\": \"+errorString)\n return\n\n def get_error(self):\n s=f\"{sys.exc_info()[1]}, {traceback.format_exc()}\"\n return s\n\n def log_error(self):\n self.logger.error(self.get_error())\n\n def show(self):\n \"\"\"\n show the current model as a ascii tree on he console\n \"\"\"\n with self.lock:\n self.__show_subtree(\"1\")\n\n\n\n def save_model(self):\n return self.save(self.currentModelName,includeData=False)\n\n # save model and data to files\n def save(self, fileName, includeData = True):\n \"\"\"\n save the model to disk, save the tables separately\n the model file will be saves as ./models/fileName.model.json and the tables will be saved under\n ./models/filename.tablePath.npy\n\n Args:\n fileName to store it under, please don't give extensions\n includeData : if set to False, we DONT store the values of node types tables or files to disk\n\n \"\"\"\n self.logger.debug(f\"save model as {fileName} with data {includeData}\")\n self.publish_event(f\"saving model {fileName}...\")\n with self.lock:\n try:\n m = self.get_model_for_web() # leave out the tables\n\n model_directory = None\n model_filename = None\n if os.path.isabs(fileName):\n model_directory = os.path.dirname(fileName)\n model_filename = os.path.basename(fileName)\n else:\n file_directory = os.path.dirname(fileName)\n if len(file_directory) == 0:\n # we are only given a filename, use 21datalab subfolder models as directory\n model_directory = os.path.join(os.path.dirname(__file__), \"models\")\n model_filename = fileName\n else:\n # we are given a relative path + filename\n model_directory = os.path.dirname(fileName)\n model_filename = os.path.basename(fileName)\n\n if includeData:\n self.ts.save(os.path.join(model_directory, model_filename))\n f = open(os.path.join(model_directory, model_filename)+ \".model.json\", \"w\")\n f.write(json.dumps(m, indent=4))\n f.close()\n self.currentModelName = fileName\n self.publish_event(f\"model {fileName} saved.\")\n return True\n except Exception as e:\n self.logger.error(\"problem sving \"+str(e))\n self.publish_event(f\"saving model {fileName} error\")\n return False\n\n def move(self, nodeList, newParent, newIndex=None):\n \"\"\"\n move a list of nodes under a new Parent on the child position new Index\n if the newParent is a referencer, we are creating references instead and keep the nodes where they are\n\n Args:\n nodeList [string]: a list of node descriptors of the nodes to move, scalar is also allowed\n NewParent [string] a node descriptor for the new parent under which the nodes should appear\n new Index int : the position on the children of newParent where the new nodes should appear\n Returns:\n True\n \"\"\"\n with self.lock:\n if not type(nodeList) is list:\n nodeList = [nodeList]\n\n nodeIds = self.get_id(nodeList)\n parentId = self.get_id(newParent)\n if not parentId: return False\n\n #check the special case that the parent is a referencer:\n if self.model[parentId][\"type\"] == \"referencer\":\n self.add_forward_refs(parentId,nodeIds)\n self.logger.info(\"moves nodes as references \"+ parentId + str(nodeIds))\n return True\n\n #for all others, we start moving nodes\n self.logger.debug(f\"model.move():{nodeIds}=>{parentId}\")\n try:\n for id in nodeIds:\n if id == parentId or id == \"1\":\n self.logger.error(\"cant move \" +id + \" to \" + parentId)\n continue\n oldParent = self.model[id][\"parent\"]\n self.model[oldParent][\"children\"].remove(id) # remove the child from the old parent\n self.model[id][\"parent\"]=parentId\n if newIndex:\n self.model[parentId][\"children\"].insert(newIndex,id) # at specific index\n else:\n self.model[parentId][\"children\"].append(id) # at the end\n self.__notify_observers(oldParent, \"children\")\n self.__notify_observers(parentId, \"children\")\n\n except Exception as ex:\n self.logger.error(f\"problem moving {nodeIds} to new parent {parentId} this is critical, the model can be messed up {ex}\")\n return True\n\n def clean_ts_entries(self):\n \"\"\"\n remove timeseries data that has no node and remove nodes (timeseries that have no timeseries data\n\n\n \"\"\"\n self.logger.debug(\"clean_ts_entries(): check consistency of model and timeseries table..\")\n deleteNodes = []\n for id, node in self.model.items():\n if node[\"type\"] == \"timeseries\":\n info = self.ts.get_info(id)\n if \"not found\" in info:\n self.logger.info(f\" {node['name']}: has no time series date entry in the ts table, remove node\")\n deleteNodes.append(id)\n for id in deleteNodes:\n self.delete_node(id)\n\n deleteTs=[]\n for id in self.ts.get_items():\n if id not in self.model:\n self.logger.info(f\" timeseries data {id} has no corresponding node in model .. delete the ts-data\")\n self.ts.delete(id)\n\n\n def load(self,fileName,includeData = True, update = False):\n \"\"\"\n replace the current model in memory with the model from disk\n please give only a name without extensions\n the filename must be in ./models\n Args:\n fileName(string) the name of the file without extension, we also accept a dict here: a list of nodes\n includeData bool: if set to false, the values for tables and files will NOT be loaded\n update : if set to true, auto correct missing entries in known templates\n \"\"\"\n result = False\n self.logger.info(f\"load {fileName}, includeData {includeData}\")\n with self.lock:\n self.publish_event(f\"loading model {fileName}...\")\n self.disable_observers()\n try:\n if type(fileName) is str:\n model_directory = None\n model_filename = None\n if os.path.isabs(fileName):\n model_directory = os.path.dirname(fileName)\n model_filename = os.path.basename(fileName)\n else:\n file_directory = os.path.dirname(fileName)\n if len(file_directory) == 0:\n # we are only given a filename, use 21datalab subfolder models as directory\n model_directory = os.path.join(os.path.dirname(__file__), \"models\")\n model_filename = fileName\n else:\n # we are given a relative path + filename\n model_directory = os.path.dirname(fileName)\n model_filename = os.path.basename(fileName)\n\n #if os.path.dirname(fileName)\n\n f = open(os.path.join(model_directory, model_filename) + \".model.json\",\"r\")\n model = json.loads(f.read())\n self.model = model\n f.close()\n self.currentModelName = fileName\n elif type(fileName) is dict:\n self.model = copy.deepcopy(fileName) # take over the nodes\n self.currentModelName = \"fromNodes\"\n #now also load the tables\n self.globalIdCounter = 0 #reset the counter and recover it further down\n for nodeId in self.model:\n if not self.idCreationHash:\n #we only recover the counter if necessary\n if int(nodeId)>self.globalIdCounter:\n self.globalIdCounter = int(nodeId) # here, we recover the global id counter\n if includeData:\n if \"version\" in self.model[\"1\"] and self.model[\"1\"][\"version\"]>=0.1:\n #new loader\n self.ts.load(os.path.join(model_directory, model_filename))\n else:\n self.logger.debug(\"time series compatibility loader\")\n #we assume data in file and use the standard inmemory table storage\n for nodeId in self.model:\n if self.get_node_info(nodeId)[\"type\"] == \"table\":\n table = self.get_browse_path(nodeId)\n data = numpy.load(os.path.join(model_directory, model_filename) + \".\" + table + \".npy\")\n #now find the time data, apply it to all variables\n timeId=self.find_table_time_node(table)\n ids = self.get_leaves_ids(table+\".columns\")\n for id, column in zip(ids, data):\n if id==timeId:\n times = column\n else:\n self.ts.create(id)\n self.set_properties({\"type\":\"timeseries\"},id)\n self.ts.set(id,values=column)\n for id in ids:\n if id == timeId:\n continue\n self.ts.set(id,times=times)\n self.clean_ts_entries() # make sure the model and ts table is consistent\n\n self.instantiate_all_objects()\n self.reset_all_objects()\n\n self.enable_observers()\n self.publish_event(f\"loading model {fileName} done.\")\n self.model[\"1\"][\"version\"]=self.version #update the version\n\n result = True\n except Exception as e:\n self.logger.error(\"problem loading\"+str(e))\n self.publish_event(f\"loading model {fileName} error.\")\n self.enable_observers()\n result = False\n\n if update:\n self.update() # automatically adjust all widgets and other known templates to the latest style\n\n\n return result\n\n\n def create_differential_handle(self, user = None):\n \"\"\"\n make a copy of the current model and keep it as copy, create a handle for it and return that handle\n this new handle is at the same time the id of te new \"user\", all the following requests for differential updata\n will be referred to this user id\n\n Returns:\n a hash handle for the current model\n \"\"\"\n with self.lock:\n #newHandle = str(uuid.uuid4().hex) # make a new unique handle\n newHandle = str(self.diffHandleCounter)\n self.diffHandleCounter += 1\n if not user:\n #also create a new user\n user = newHandle\n self.differentialHandles[newHandle]= {\n \"user\":user,\n \"model\":self.get_model_for_web(),\n \"time\": int(time.time()),\n \"updateCounter\": self.modelUpdateCounter\n }# make an entry by copying the whole model\n return newHandle\n\n\n def get_differential_update(self,oldHandle,newHandle=None):\n \"\"\"\n this function takes the copy of the model (hopefully) held under handle and compares it to the current model:\n the differences are analyzed and returned, t\n to avoid endless storage of old references, we have the deletin stategy: for every \"user\" we keep a max of\n self.differentialHandlesMaxPerUser, if we have more, we delete the oldest\n\n Args:\n oldHandle (string): the unique id of the old version of the model\n newHandle (string): the unique id of the new version to compare to, if not given, we take the current\n and will automatically make a new entry for the current\n delOld: if set, we remove the old entry from the memorized models with a one step delay\n Returns (dict):\n containing information about the changes between and old and new version of the model\n key values:\n \"handle\":(string): the handle under which we find the new version of the model\n \"newNodes\": (dict) nodes which are new to the tree in the form Nodeid:{properties}\n \"deletedNodeIds\": (list) list of node ids which have been deleted\n \"modifiedNodes\": (dict) nodes which have changed properties: if so, we give the full updated node back\n\n \"\"\"\n with self.lock:\n diff={\"handle\":None,\"newNodes\":{},\"deletedNodeIds\":[],\"modifiedNodes\":{}} # the response for web\n\n if oldHandle not in self.differentialHandles:\n return None # the old handle does not exist, we can't handle this request\n\n if newHandle is None:\n # this is the standard case, we generate the new handle now\n user = self.differentialHandles[oldHandle][\"user\"]\n # we make a quick check if the model has changed at all, if not we simply return the old handle\n if self.differentialHandles[oldHandle][\"updateCounter\"] == self.modelUpdateCounter:\n self.logger.debug(\"get_differential_update: shortcut for no changes\")\n diff[\"handle\"] = oldHandle\n return diff\n newHandle = self.create_differential_handle(user=user) # this function also makes a copy of the current tree and puts it in the self.differential handles list\n newModel = self.differentialHandles[newHandle][\"model\"]\n else:\n if newHandle in self.differentialHandles:\n newModel = self.differentialHandles[newHandle]\n else:\n return None # the newhandle did not exist\n oldModel = self.differentialHandles[oldHandle][\"model\"]\n\n # delete strategy: for every \"user\" we track a maximum of self.differentialHandlesMaxPerUser\n users={}\n for handle,entry in self.differentialHandles.items():\n user = entry[\"user\"]\n if user not in users:\n users[user]={}\n users[ user][ handle ] = entry[\"time\"]\n for user,entries in users.items():\n if len(entries)> self.differentialHandlesMaxPerUser:\n #must clean up history of that user, entries is a dict of handle:time\n sortedKeys =[key for key, value in sorted(entries.items(), key=lambda item: item[1])]\n removeKeys = sortedKeys[:-self.differentialHandlesMaxPerUser]\n self.logger.debug(\"remove handle\"+str(removeKeys)+\" of user\"+user)\n for key in removeKeys:\n del self.differentialHandles[key]\n\n\n\n\n\n #find the changes between the models\n for newNodeId in newModel:\n if newNodeId not in oldModel:\n #this node is not found in the old model, so it is new\n diff[\"newNodes\"][newNodeId]=copy.deepcopy(newModel[newNodeId])\n else:\n #this node is in both models, check if there was a change insight the nodes\n #for a deep comparison, serialize them\n newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)\n oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)\n if newNodeSerialized != oldNodeSerialized:\n #something is different, so return that node\n diff[\"modifiedNodes\"][newNodeId]=copy.deepcopy(newModel[newNodeId])\n\n #now check for deleted once, these appear in the old but not in the new\n diff[\"deletedNodeIds\"]=list(set(oldModel.keys())-set(newModel.keys()))\n diff[\"handle\"]=newHandle\n\n return diff\n\n\n def publish_event(self, event):\n \"\"\"\n send out an event e.g. for status information\n event to send looks like\n event = { \"id\": 1123,\n \"event\": \"system.status\"\n \"data:\"{\"nodeId\":xx, \"value\":..,\"function\":... ...}\n }\n Args\n event [string or dict]\n \"\"\"\n self.logger.debug(f\"publish_event ({event})\")\n self.modelUpdateCounter += 1\n\n if type(event) is str:\n #make sure the formatting is json compatible\n event = event.replace(\"'\",'\"')# ' => \"\n event={\"event\":\"system.status\",\"data\":{\"text\":event}}\n event[\"id\"]=self.modelUpdateCounter\n\n for observerObject in self.observers:\n observerObject.update(event)\n\n\n def disable_observers(self):\n self.lock_model()\n #with self.lock:\n self.disableObserverCounter += 1\n #self.logger.debug(f\"disable_observers() {self.disableObserverCounter}\")\n\n def enable_observers(self):\n self.release_model()\n\n if self.disableObserverCounter >0:\n self.disableObserverCounter -=1\n else:\n self.logger.error(\"enable_observers without disable observers\")\n #self.logger.debug(f\"enable_observers() {self.disableObserverCounter}\")\n\n\n\n\n def notify_observers(self, nodeIds, properties, eventInfo={}):\n \"\"\"\n public wrapper for __notify observser, only expert use!\n \"\"\"\n #self.logger.info(f\"notify observses(), {str_lim(nodeIds,50)}, {properties}\")\n return self.__notify_observers(nodeIds,properties,eventInfo)\n\n def get_referencers(self,descList,deepLevel = 0):\n \"\"\"\n get the references to this node via backtraversing the leaves algorithm\n we look for parents through deepLevel levels and from there on we look back for referencers\n deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level\n Returns:\n a list of referencers ids that point to the given descList nodes\n \"\"\"\n #convert all to nodes to ids\n\n if type(descList) is not list:\n descList = [descList]\n startList = set([self.__get_id(node) for node in descList])\n startList =set([node for node in startList if node]) #remove None and duplicates\n\n referencers = set() #we collect the parents here and avoid duplicates\n\n #in this first iteration we take the referencers pointing directly to the nodes or their parents\n workList = startList.copy()\n for level in range(deepLevel+1):\n #from this level we take the backrefs\n for id in workList:\n referencers.update(self.model[id][\"backRefs\"])\n #prepare parents for next round\n parents=set()\n for id in workList:\n myParent=self.model[id][\"parent\"]\n if myParent not in [\"0\",\"1\"]: #root\n parents.update([myParent]) #!use list to avoid break into chars\n #now take the parents as currentList\n workList = parents.copy()\n if workList ==[]:\n break #avoid turning cycles for nothing\n\n #second step:\n # now we take all final referencers and all referencers to those referencers with no limit\n # (go back the leaves algorithm)\n collectedReferencers = referencers.copy() # we take all we have so far\n while True:\n workList=set()\n for id in referencers:\n workList.update(self.model[id][\"backRefs\"])\n collectedReferencers.update(workList)\n if not workList:\n break\n else:\n #one more round\n referencers = workList.copy()\n return list(collectedReferencers)\n\n def __notify_observers(self, nodeIds, properties, eventInfo={} ):\n \"\"\"\n this function is called internally when nodes or properties have changed. Then, we look if any\n observer has to be triggered\n we also increase the counter and time on the root.observers.modelObserver\n Args:\n nodeId: the nodeIds where a change occurred\n properties: the property or list of properties of the node that has changed\n\n \"\"\"\n\n #exception for the progress node\n if type(properties) is not list:\n properties = [properties]\n if type(nodeIds) is not list:\n nodeIds = [nodeIds]\n\n if self.disableObserverCounter>0:\n #only one exception: progress works always\n mustReturn = True\n with self.lock:\n for nodeId in nodeIds:\n if self.model[nodeId][\"name\"] == \"progress\":\n mustReturn = False\n break\n if mustReturn:\n #self.logger.info(f\"__notify_observers disable return {nodeIds} {properties}\")\n return\n\n with self.lock:\n # this is for the tree updates, any change is taken\n self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies\n\n collectedEvents=[]\n\n enableTree = self.get_node(\"root.system.enableTreeUpdateEvents\")\n if enableTree and enableTree.get_value()==False:\n pass\n else:\n # Notify all observers about the tree update, this is a standard\n event = {\n \"id\": self.modelUpdateCounter,\n \"event\": \"tree.update\",\n \"data\": \"\"}\n collectedEvents.append(event) # send later\n\n\n names =[self.model[id][\"name\"] for id in nodeIds]\n self.logger.debug(f\"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}\")\n\n triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered\n #p=utils.Profiling(\"__notify.iterate_nodes\")\n\n referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy\n nodeId = self.__get_id(nodeIds[0])#take the first for the event string,\n #p.lap(f\"get refs for {nodeId}\")\n self.logger.debug(f\"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}\")\n for id in referencers:\n if self.model[id][\"name\"] == \"targets\" and self.model[self.model[id][\"parent\"]][\"type\"] == \"observer\":\n # this referencers is an observer,\n observerId = self.model[id][\"parent\"]\n observer = self.get_children_dict(observerId)\n # check if trigger\n if observer[\"enabled\"][\"value\"] == True:\n #self.logger.debug(f\"{self.model[nodeId]['name']} is targeted by observer {self.get_browse_path(observerId)}\")\n if observerId in triggeredObservers:\n self.logger.debug(f\"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass\")\n continue\n #self.logger.debug(f\"check properties to triggered the observer {self.get_browse_path(observerId)}\")\n #check if any of the observed properties matches\n propertyMatch = False\n for property in properties:\n if property in observer[\"properties\"][\"value\"]:\n propertyMatch=True\n break\n if not propertyMatch:\n #self.logger.debug(f\"observer trigger on {self.get_browse_path(observerId)} no property match \")\n pass\n else:\n self.logger.debug(f\"observer trigger on {self.get_browse_path(observerId)} for change in {property}\")\n self.model[observer[\"triggerCounter\"][\"id\"]][\"value\"] = self.model[observer[\"triggerCounter\"][\"id\"]][\"value\"]+1\n self.model[observer[\"lastTriggerTime\"][\"id\"]][\"value\"] = datetime.datetime.now().isoformat()\n for funcNodeId in self.get_leaves_ids(observer[\"onTriggerFunction\"][\"id\"]):\n self.logger.debug(f\"execute ontrigger function {funcNodeId}\")\n self.execute_function(funcNodeId)\n if \"triggerSourceId\" in observer:\n self.model[observer[\"triggerSourceId\"][\"id\"]][\"value\"] = nodeId\n if observer[\"hasEvent\"][\"value\"] == True:\n #self.logger.debug(f\"send event {observer['eventString']['value']}\")\n #also send the real event\n #self.modelUpdateCounter = self.modelUpdateCounter+1\n event = {\n \"id\": self.modelUpdateCounter,\n \"event\": observer[\"eventString\"][\"value\"],\n \"data\": {\"nodeId\":observerId,\"sourceId\":nodeId,\"sourcePath\":self.get_browse_path(nodeId)}}\n if self.model[nodeId][\"type\"] not in [\"column\",\"file\",\"timeseries\"]:\n event[\"data\"][\"value\"]=self.model[nodeId][\"value\"]\n #some special handling\n try:\n if event[\"event\"] == \"system.progress\":\n progressNode = self.get_node(self.get_leaves_ids(\"root.system.progress.targets\")[0])\n event[\"data\"][\"value\"] = progressNode.get_value()\n event[\"data\"][\"function\"] = progressNode.get_parent().get_parent().get_browse_path()\n else:\n eventNode = self.get_node(observerId)\n extraInfoNode = eventNode.get_child(\"eventData\")\n if extraInfoNode:\n extraInfo = extraInfoNode.get_value()\n if type(extraInfo) is not dict:\n extraInfo={\"info\":extraInfo}\n event[\"data\"].update(extraInfo)\n if eventInfo:\n event[\"data\"][\"_eventInfo\"]=eventInfo #put this only if we have info\n\n except Exception as ex:\n self.logger.error(f\"error getting extra info for event {ex}, {sys.exc_info()[0]}\")\n #for all other events, take the event data if there is one (as json)\n\n self.logger.debug(f\"generate event {event}\")\n collectedEvents.append(event)\n triggeredObservers.append(observerId)# next time, we don't trigger\n\n #p.lap(\"complete backrefs {nodeId}, {backrefs}\")\n #self.logger.debug(p)\n #self.logger.debug(\"now send the events\")\n #event = copy.deepcopy(event)\n for event in collectedEvents:\n for observerObject in self.observers:\n observerObject.update(event)\n self.logger.debug(f\"done sending {len(collectedEvents)} events\")\n\n\n\n def create_observer(self):\n # Instantiate a new observer\n observer = Observer(self)\n # attach it to the model\n self.attach_observer(observer)\n # return the observer\n return observer\n\n def attach_observer(self, observer):\n # Add a new observer\n self.logger.debug(f\"Adding new observer: {id(observer)}\")\n with self.lock:\n self.observers.append(observer)\n\n def detach_observer(self, observer):\n with self.lock:\n try:\n self.observers.remove(observer)\n self.logger.debug(f\"Removing observer: {id(observer)}\")\n\n except ValueError:\n self.logger.exception(\"Trying to remove an observer which doesn't exist in the list of observers.\")\n \n def set_column_len(self,nodeDescriptor,newLen):\n \"\"\"\n adjust the len of a colum, extension are inf-padded,\n Args: nodeDescriptor: the node\n newLen (int) the new lenth of the column\n Returns:\n the new value set or none if problem\n \"\"\"\n with self.lock:\n id = self.get_id(nodeDescriptor)\n if not id: return None\n if self.model[id][\"type\"] != \"column\":\n self.logger.error(\"set_column_len: not a column\")\n return None\n #now make the adjustments\n if type(self.model[id]['value']) != numpy.ndarray:\n self.model[id]['value'] = numpy.full(newLen, numpy.nan)\n else:\n #is already an array\n if len(self.model[id]['value']) == newLen:\n #nothing to do\n pass\n if len(self.model[id]['value']) > newLen:\n self.model[id]['value'] = self.model[id]['value'][0:newLen]\n elif len(self.model[id]['value']) < newLen:\n self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))\n else:\n #same len\n pass\n return newLen\n\n def get_upload_folder_files(self, matchFilter=None, blackList = []):\n \"\"\"\n Args:\n fileNameMatch: a string that must be contained in the files to deliver\n blackList: a list of filenames which should not be delivered\n Returns list of files with absolute file names, list of files with fileNames\n \"\"\"\n\n\n full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path\n path, filename = os.path.split(full_path)\n folder = path+r'\\upload'\n\n absFileNames = []\n foundFileNames = []\n #now iterate the uploaded files\n fileNames = os.listdir(folder)\n for idx,fileName in enumerate(fileNames):\n if matchFilter:\n if matchFilter not in fileName:\n continue # this file will be ignored\n if fileName in blackList:\n continue\n foundFileNames.append(fileName)\n\n absFileNames = [folder+\"\\\\\"+fileName for fileName in foundFileNames]\n return foundFileNames,absFileNames\n\n\n\n\n def update(self):\n \"\"\"\n update all known widgets to the latest template including complex backward compatibility changes\n :return:\n \"\"\"\n self.logger.info(\"update() running...\")\n self.disable_observers()\n try:\n # the ts widgets:\n # now go throught the widget and update all according the template\n # now find all type widget\n newNodes = {}\n helperModel = Model()\n helperModel.disable_observers()\n helperModel.create_template_from_path(\"root.widget\", self.get_templates()['templates.timeseriesWidget'])\n\n widgets = []\n for id, props in self.model.items():\n if props[\"type\"] == \"widget\":\n widgetObject = self.get_node(id)\n if widgetObject.get_child(\"widgetType\").get_value() == \"timeSeriesWidget\":\n widgets.append(id)\n self.logger.debug(f\"update():found widget {widgetObject.get_browse_path()}\")\n\n for id in widgets:\n path = self.get_browse_path(id)\n mirrorBefore = self.get_branch_pretty(path)\n self.create_template_from_path(path,self.get_templates()['templates.timeseriesWidget']) # this will create all nodes which are not there yet\n\n # now make specific updates e.g. linking of referencers, update of list to dicts etc.\n # if colors is a list: make a dict out of it\n colors = self.get_value(f\"{id}.hasAnnotation.colors\")\n tags = self.get_value(f\"{id}.hasAnnotation.tags\")\n if type(colors) is list:\n colors = {v:{\"color\":colors[idx],\"pattern\":None} for idx,v in enumerate(tags)}\n self.logger.debug(f\"update(): set value{id}.hasAnnotation.colors := {colors} \")\n self.set_value(f\"{id}.hasAnnotation.colors\",colors)\n\n if not \"visibleTags\" in mirrorBefore[\"hasAnnotation\"] or (self.get_value(f\"{id}.hasAnnotation.visibleTags\") != mirrorBefore[\"hasAnnotation\"][\"visibleTags\"][\".properties\"][\"value\"]):\n #it is different or new, so we created it now\n visibleTags = {tag:True for tag in tags}\n #make sure that from the colors, we take them as well\n updateVisibleTags = {tag:True for tag in colors}\n visibleTags.update(updateVisibleTags)\n\n self.set_value(f\"{id}.hasAnnotation.visibleTags\",visibleTags)\n self.logger.debug(f\"update(): set value{id}.visibleTagss := {visibleTags} \")\n\n #make sure the hasAnnotation.annotations referencer points to newannotations as well\n self.add_forward_refs(f\"{id}.hasAnnotation.annotations\",[f\"{id}.hasAnnotation.newAnnotations\"],allowDuplicates=False)\n\n #now make sure the observers have at least the required properties enabled\n widget = self.get_node(id)\n helperRoot = helperModel.get_node(\"root.widget\")\n template = self.get_templates()['templates.timeseriesWidget']\n\n children = helperRoot.get_children(3)\n print(f\"2 level children {[node.get_browse_path() for node in children]}\")\n for child in helperRoot.get_children():\n if child.get_properties()[\"type\"] == \"observer\":\n widgetNode = widget.get_child(child.get_name()).get_child(\"properties\")\n helperNode = child.get_child(\"properties\")\n\n for prop in helperNode.get_value():\n current = widgetNode.get_value()\n if prop not in current:\n current.append(prop)\n widgetNode.set_value(current)\n\n for child in helperRoot.get_children(3):\n if child.get_properties()[\"type\"] == \"referencer\":\n self.logger.debug(f\"found referencer {child.get_name()}\")\n # now adjust the references of new nodes and of the ones that were there\n targets = child.get_properties()[\"forwardRefs\"]\n if targets:\n targets = [helperModel.get_browse_path(ref) for ref in targets]\n requiredTargets = [widget.get_browse_path()+\".\"+\".\".join(ref.split(\".\")[2:]) for ref in targets]\n self.logger.debug(f\"required targets {requiredTargets}\")\n #now check in the model\n widgetNodePath = widget.get_browse_path()+ child.get_browse_path()[len(helperRoot.get_browse_path()):]\n widgetNode = self.get_node(widgetNodePath)\n #now check if we have them\n targetPaths = [tNode.get_browse_path() for tNode in widgetNode.get_targets()]\n for target in requiredTargets:\n if target not in targetPaths:\n self.logger.debug(f\"adding ref {widgetNode.get_browse_path()} => {target}\")\n self.add_forward_refs(widgetNode.get_id(),[target])\n\n\n\n #now the system progress observer\n if not self.get_node(\"root.system.progress\"):\n self.create_template_from_path(\"root.system.progress\",self.get_templates()['system.observer'])\n self.set_value(\"root.system.progress.hasEvent\",True)\n self.set_value(\"root.system.progress.eventString\",\"system.progress\")\n self.set_value(\"root.system.progress.properties\",[\"value\"])\n self.set_value(\"root.system.progress.enabled\",True)\n\n\n except Exception as ex:\n self.logger.error(f\" {ex} , {sys.exc_info()[0]}\")\n helperModel.delete()\n\n helperModel.delete()\n self.enable_observers()\n\n # ########################################\n # time series api\n\n def time_series_create(self,desc):\n id = self.get_id(desc)\n return self.ts.create(id)\n\n def time_series_delete(self,desc):\n id = self.get_id(desc)\n return self.ts.delete(id)\n\n def time_series_insert(self, desc, values=None, times=None, allowDuplicates = False):\n id = self.get_id(desc)\n if not id in self.model:\n return None\n with self.lock:\n result = self.ts.insert(id,values, times,allowDuplicates=allowDuplicates)\n\n self.__notify_observers(id, \"value\")\n return result\n\n def time_series_append(self, desc, values=None, times=None):\n id = self.get_id(desc)\n if not id in self.model:\n return None\n with self.lock:\n result = self.ts.append(id,values, times)\n self.__notify_observers(id, \"value\")\n return result\n\n def time_series_delete_area(self,desc,start=None,end=None):\n id = self.get_id(desc)\n if not id in self.model:\n return None\n with self.lock:\n result = self.ts.delete_area(id,start=start,end=end)\n self.__notify_observers(id, \"value\")\n return result\n\n\n def time_series_merge(self, desc, values = None, times = None):\n id = self.get_id(desc)\n if not id in self.model:\n return False\n return self.ts.merge(id,values=values,times=times)\n\n\n\n def time_series_set(self,desc,values=None,times=None):\n id = self.get_id(desc)\n if not id in self.model:\n return None\n if self.lock:\n result = self.ts.set(id,values=values,times=times)\n self.__notify_observers(id, \"value\")\n return result\n\n def time_series_get_table(self,\n variables,\n tableDescriptor = None,\n start=None,\n end=None,\n noBins=None,\n includeIntervalLimits=False,\n resampleTimes=None,\n format=\"default\",\n toList = False,\n resampleMethod = None,\n copy=True):\n \"\"\"\n get a time series table from variables (nodes of type \"timeseries\").\n\n\n Args:\n variables [list of ode descriptors]: nodes to be part the data table requested (ordered!)\n\n tableDescriptor : a desc for the table where the variables reside\n possible addressing of te request nodes:\n 1) ids or browsepaths of nodes (no tableDescriptor needed)\n 2) names of nodes and tableDescriptor of the table (names must be unique in the columns of the table)\n\n startime, endTime [float]:\n the start and endtime of the table given as seconds since epoch\n we also allow the special case of endTime = 0 and startTime = -interval\n we also allow the special case of startTime given and end time= 0\n\n noBins(int): the number of samples to be returned inside the table between start end endtime,\n if None is given, we return all samples (rows) we have in the table and to not aggregate\n\n includeIntervalLimits [bool]: if set to true, we will include one more data point each left and right of the requested time\n\n format: [enum] \"default\", \"flat\", see return description\n\n\n resampleMethod [enum]:\n how to resample if we need to; options are:\n None (if not specified): sample and hold\n \"linear\": linear interpolation\n \"linearfill\": linear interpolation and also interpolate \"nan\" or \"inf\" values in the original data\n\n toList: (bool) True: return data as python list, False: return numpy arrays\n\n examples:\n - get all data of the variables\n data = m.get_time_series_table([\"root.mytable.variables.a\",\"root.mytable.variables.b\"]) # get all data\n - request max 300 values of data (this is what the UI does)\n data = m.get_time_series_table([\"a\",\"b\"],\"root.mytable\",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)\n - request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation\n times = list(range(1581483065,1581483065+100,25))\n data = m.get_time_series_table([\"a\",\"b\"],\"root.mytable\",resampleTimes = times,resampleMethod = \"linearfill\")\n\n\n Returns(dict)\n formatting depends on the \"format\" option\n \"defaut\": return the result as {\"var_a\":{\"values\":[],\"__time\":[]}, \"var_b\":{\"values\":[],\"__time\":[]..}\n \"flat\" return the result as {\"var_a\":[], \"var_a__time\":[],\"var_b\":[],\"var_b__time\":[]....}\n the variable descriptor are the ones given in the request\n \"__time\" : list of timestamps for the returned table in epoch seconds as float64\n \"values\": the list of float values of one of the requested variables\n \"\"\"\n if tableDescriptor:\n tableId = self.get_id(tableDescriptor)\n tableVars = self.get_leaves(tableId+\".columns\")\n else:\n tableId = None\n\n if type(start) is str:\n start = date2secs(start)\n if type(end) is str:\n end = date2secs(end)\n\n with self.lock:\n #first check if all requested timeseries exist and have type time series\n #vars = [] #self.get_id(variables)\n\n if not type(variables) is list:\n variables= [variables]\n\n varIds = {} # NodeId: request descriptor\n for var in variables:\n varId = self.get_id(var)\n if not varId:\n #try to find per columns and table desc\n found = False\n if tableId:\n for tableVar in tableVars:\n if tableVar[\"name\"] == var:\n varId = tableVar[\"id\"]\n found = True\n break\n\n if not found:\n self.logger.error(f\"requested variable {var} does not exist\")\n return False\n if self.model[varId][\"type\"]!=\"timeseries\":\n self.logger.error(f\"requested variable {var} not timeseries, instead {self.model[varId]['type']}\")\n return False\n\n varIds[varId]=var #remeber it for later\n\n table = self.ts.get_table(list(varIds.keys()), start=start, end=end, copy=copy, resampleTimes=resampleTimes, noBins = noBins, includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod)\n\n #now wrap back the descriptor to the query, if is was a browsepath, we return and browsepath, if is was an id, we return id\n # make some formatting\n def convert(input,toList=toList):\n if toList:\n return list(input)\n else:\n return input\n\n result = {}\n for k,v in table.items():\n if format==\"flat\":\n result[varIds[k]]=convert(v[\"values\"])\n result[varIds[k]+\"__time\"]=convert(v[\"__time\"])\n else:\n result[varIds[k]] = {\"values\":convert(v[\"values\"]),\"__time\":convert(v[\"__time\"])}\n\n #if len(variables) == 1:\n # #we only have one variable, so we return without descriptor\n # result = result[list(result.keys())[0]]\n\n return result\n\n def time_series_get_info(self,name=None):\n return self.ts.get_info(name)\n\n\n def time_series_get_raw(self,id,start=None,end=None):\n\n table = self.ts.get_table([id], start=start, end=end, copy=False, resampleTimes=None,\n noBins=None, includeIntervalLimits=False,\n resampleMethod=None)\n\n result = table[id]\n return result\n\n def time_series_insert_blobs(self, tableDesc, blobs=[]):\n \"\"\" blob is a dict or list of dicts of key and values containing one time base like\n the descriptors of teh variables can be ids, browsepaths or just names (without dots)\n if the descriptors are names, we try to find them in the model, they must exist there uniquely, otherwise\n they cant be processed\n we also autocreate the table or missing variables\n\n the data will be put in a table:\n - we try to find the table based on one of the variables, if not found, we create the table\n\n {\n \"a\": [1.5,1.6,1.7]m\n \"b\": [2,3,4]\n \"__time\" :[100001,100002,100003]\n }\n \"\"\"\n if not type(blobs) is list:\n blobs=[blobs]\n\n #first, find the table\n with self.lock:\n tableId = self.get_id(tableDesc)\n if not tableId:\n #try to find the table from the first node\n\n #table not found, create it\n tableId = self.create_node_from_path(tableDesc,properties={\"type\":\"table\"})\n if tableId:\n columnsId = self.create_node(parent=tableId, name=\"columns\", properties={\"type\": \"referencer\"})\n variablesId = self.create_node(parent=tableId, name=\"variables\", properties={\"type\": \"folder\"})\n else:\n self.logger.error(f\"cant create table {tableDesc}\")\n return False\n else:\n columnsId = self.get_child(tableId,\"columns\")\n variablesId = self.get_child(tableId, \"variables\")\n\n #now we know the tableId, columnsId, variablesId\n\n # iterate over all blobs and find the ids of the names in the blobs, if not found, create it\n # exchange the descriptors to ids\n\n desc2Id = {} # key: the descriptor from the input blob v: the id in the model\n\n tableVars = self.get_leaves(columnsId)\n desc2Id = {dic[\"name\"]:dic[\"id\"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names\n\n #convert all to ids\n newBlobs=[]\n idsInBlobs=[]\n for blob in blobs:\n newBlob={}\n for k,v in blob.items():\n\n if k==\"__time\":\n newBlob[k]=v\n else:\n #does this id already exist?\n if k in desc2Id:\n id = desc2Id[k]\n else:\n id = None\n #try to find\n for var in tableVars:\n if var[\"name\"] == k:\n id = v[\"id\"]\n break\n if not id:\n #still not found, we need to create it\n id = self.create_node(parent=variablesId,name=k,properties={\"type\": \"timeseries\"})\n if not id:\n self.logger.error(f\"cant find or create {name}\")\n continue\n else:\n self.add_forward_refs(columnsId,[id])\n desc2Id[k]=id #remember to speed up next time\n\n newBlob[id] = v\n idsInBlobs.append(id)\n newBlobs.append(newBlob)\n self.logger.debug(f\"inserting blobs {len(newBlobs)}\")\n self.__notify_observers(idsInBlobs, \"value\")\n result = self.ts.insert_blobs(newBlobs)\n return result\n\n\n # ########################################\n # event series api\n\n def event_series_create(self,desc,map={}):\n id = self.get_id(desc)\n\n if \"eventMap\" in self.model[id]:\n self.model[id][\"eventMap\"].update(map)\n else:\n self.model[id][\"eventMap\"]=map.copy()\n return self.ts.create(id)\n\n\n def event_series_get_new_number_entry(self,id):\n eventMap = self.model[id][\"eventMap\"]\n numbers = [v for k, v in eventMap.items()]\n newNumber = max(numbers)+1\n while newNumber in numbers:\n newNumber = newNumber+1\n return newNumber\n\n def event_series_get_event_number(self, desc, event, autoCreate=True):\n id = self.get_id(desc)\n if not id:\n return None\n with self.lock:\n eventMap = self.model[id][\"eventMap\"] # a dict like {\"starting\":1, \"machineStop\":2,...}\n if type(event) in [str,numpy.str_]:\n if event not in [k for k,v in eventMap.items()]:\n if not autoCreate:\n return None\n # we must put a new eventString\n if eventMap == {}:\n newEventNumber = 1\n else:\n newEventNumber = self.event_series_get_new_number_entry(id)\n self.model[id][\"eventMap\"][event] = newEventNumber\n return newEventNumber\n else:\n #is a known event string, get the number\n return eventMap[event]\n else:\n #this is a number already, check if it is in the map\n eventNumbers = [v for k,v in eventMap.items()]\n if event in eventNumbers:\n return event\n else:\n if not autoCreate:\n return None\n #must create a new entry\n try:\n #to make sure we have only numbers there\n newEventString = \"event_\"+str(int(event))\n self.model[id][\"eventMap\"][newEventString]=int(event)\n except:\n self.log_error()\n return None\n return event\n\n\n def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):\n \"\"\"\n Args:\n values: list of events, where the event is either an eventString or an event number\n if values is a scalar, we assume that for all times the same event will be inserted\n allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time\n different events are always allowed on the same time\n \"\"\"\n id = self.get_id(desc)\n if not id in self.model:\n return None\n if not values or not times:\n return None\n if not(type(values) is list or type(values) is numpy.ndarray):\n values = [values]*len(times)\n\n #convert the values to numbers and create new map entry if needed\n numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)\n #convert the times to epoch if not already done\n epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)\n\n if not allowEventDuplicates:\n # we must delete the events which exist already at the same time with the same event\n data = self.event_series_get(desc)\n takeIndices = numpy.full(len(times),True)\n for idx,tim in enumerate(times):\n duplicates = numpy.where(data[\"__time\"]==tim)[0]\n for pos in duplicates:\n if numbers[idx] == data[\"values\"][pos]:\n takeIndices[idx] = False\n numbers = numbers[takeIndices]\n epochs = epochs[takeIndices]\n\n with self.lock:\n #on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times\n # such that different or the same events can happen at the same time and thus produce the same\n # time stamp in the time series\n result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!\n\n self.__notify_observers(id, \"value\")\n return result\n\n def event_series_set(self,desc,values=None,times=None):\n id = self.get_id(desc)\n if not id in self.model:\n return None\n if self.lock:\n # now \"refresh\" the event map\n #self.model[id][\"eventMap\"]={}\n numbers = [self.event_series_get_event_number(id, event) for event in values]\n result = self.ts.set(id,values=numbers,times=times)\n self.__notify_observers(id, \"value\")\n return result\n\n def event_series_get(self,desc, start=None,end=None,format=\"default\",eventFilter=None):\n \"\"\"\n get events from a event series\n Args:\n desc: node descricptor\n start , end [float]:\n the start and endtime of the table given as seconds since epoch\n we also allow the special case of endTime = 0 and startTime = -interval\n we also allow the special case of startTime given and end time= 0\n\n format: [enum] \"default\"\n eventFilter : [string] a list of eventStrings as positive match filter\n\n toList: (bool) True: return data as python list, False: return numpy arrays\n\n examples:\n - get all data of the variables\n data = m.get_time_series_table([\"root.mytable.variables.a\",\"root.mytable.variables.b\"]) # get all data\n - request max 300 values of data (this is what the UI does)\n data = m.get_time_series_table([\"a\",\"b\"],\"root.mytable\",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)\n - request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation\n times = list(range(1581483065,1581483065+100,25))\n data = m.get_time_series_table([\"a\",\"b\"],\"root.mytable\",resampleTimes = times,resampleMethod = \"linearfill\")\n Returns(dict)\n formatting depends on the \"format\" option\n \"defaut\": return the result as {\"values\":[],\"__time\":[], \"eventstrings\": \"map\":{1:\"myevent\",2:\"anotherevent\"}\n \"\"\"\n id = self.get_id(desc)\n if not id:\n return None\n\n data = self.ts.get_table([id], start=start, end=end)\n if data == {}:\n #this variable is not in the store\n data = {id:{\"values\":numpy.asarray([]),\"__time\":numpy.asarray([])}}\n\n eventMap = self.model[id][\"eventMap\"].copy()\n reverseMap = {v:k for k,v in eventMap.items()}\n values = data[id][\"values\"].astype(numpy.int)\n times = data[id][\"__time\"]\n\n #now filter\n if eventFilter:\n filter = []\n if type(eventFilter) is not list:\n eventFilter = [eventFilter]\n for evString in eventFilter:\n if evString in eventMap:\n filter.append(eventMap[evString])\n indices = [idx for idx,val in enumerate(values) if val in filter]\n values = values[indices]\n times = times[indices]\n\n result = {\n \"values\":values,\n \"__time\":times,\n \"eventMap\":eventMap,\n \"eventStrings\":[reverseMap[v] for v in values]\n }\n if format == \"iso\":\n #convert the timestamps to iso\n result[\"__time\"]=[epochToIsoString(t) for t in result[\"__time\"]]\n if format == \"events\":\n existingEvents = set(result[\"values\"])\n events = {reverseMap[ev]:[] for ev in existingEvents}\n for ev,ti in zip(result[\"values\"],result[\"__time\"]):\n events[reverseMap[ev]].append(ti)\n result[\"events\"]=events\n del result[\"values\"]\n del result[\"__time\"]\n del result[\"eventStrings\"]\n\n return result\n\n def event_series_insert_blob(self,blob):\n \"\"\"\n insert events in various blob syntax\n\n Args:\n desc: the node descriptor\n blob: a dictionary in various styles\n a) {\n \"node\": nodedescriptor\n \"events\":\"startMachine\"\n \"__time\": [\"2018.01.01T00:10:08.445+02:00\",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch\n }\n b) {\n \"node\": nodedescriptor\n \"events\":[\"startMachine\",\"stopMachine\",\"startMachine\",\"startMachine]\n \"__time\": [\"2018.01.01T00:10:08.445+02:00\",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch\n }\n c) \"events:[\n {\"event\":\"startMachine\",\n \"__time\":\"2018.01.01T00:10:08.445+02:00\"\n },\n {\"event\":\"stopMachine\",\n \"__time\":\"2018.01.01T00:10:08.445+02:00\"\n }\n Returns\n true/false for success\n\n \"\"\"\n\n if type(blob[\"events\"]) is not list:\n #style a)\n events = blob[\"events\"]\n times = blob[\"__time\"]\n else:\n #events is a list\n if type(blob[\"events\"][0]) is dict:\n #style c)\n events = []\n times = []\n for d in blob[\"events\"]:\n events.append(d[\"event\"])\n times.append(d[\"__time\"])\n else:\n #style b)\n events = blob[\"events\"]\n times = blob[\"__time\"]\n return self.event_series_insert(blob[\"node\"],events,times)\n\n def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):\n id = self.get_id(desc)\n if not id:\n return None\n\n if start == None and end == None and eventsToDelete == []:\n #delete all\n with self.lock:\n self.model[id][\"eventMap\"]={}\n result = self.ts.set(id, values=[], times=[])\n else:\n #delete some events\n with self.lock:\n data = self.ts.get_table([id])\n if not start:\n start = 0\n if not end:\n end = numpy.inf\n\n times = data[id][\"__time\"]\n values = data[id][\"values\"]\n over = times>=start\n under = times<=end\n deleteMaskTime = over & under\n if eventsToDelete == []:\n deleteMaskValues = numpy.full(len(deleteMaskTime),True)\n else:\n deleteMaskValues = numpy.full(len(deleteMaskTime),False)\n for ev in eventsToDelete:\n evNumber = self.model[id][\"eventMap\"][ev]\n mask = values == evNumber\n deleteMaskValues = deleteMaskValues | mask\n deleteMask = deleteMaskTime & deleteMaskValues\n times = times[~deleteMask]\n values = values[~deleteMask]\n self.event_series_set(id,values,times)\n\n print(data)\n\n def get_object(self,desc):\n id = self.get_id(desc)\n if not id:\n return False\n with self.lock:\n if not self.model[id][\"type\"] == \"object\":\n return None\n if \"object\" not in self.model[id]:\n return None\n return self.model[id][\"object\"]\n\n def instantiate_object(self,desc,writeToModel=True):\n id = self.get_id(desc)\n if not id:\n return False\n with self.lock:\n if not self.model[id][\"type\"] == \"object\":\n return False\n try:\n className = self.model[id][\"class\"]\n if \"autoReload\" in self.model[id] and self.model[id][\"autoReload\"]==True and self.global_auto_reload_enabled():\n # must reload the module\n module = importlib.reload(self.objectClasses[className][\"module\"])\n classDefinition = getattr(module, className.split('.', 1).pop())\n # now update our global list\n self.objectClasses[className][\"module\"] = module\n self.objectClasses[className][\"class\"] = classDefinition\n\n classDefinition = self.objectClasses[className][\"class\"]\n object = classDefinition(self.get_node(id)) #instantiate the object\n if writeToModel:\n self.model[id][\"object\"]=object\n return object\n except:\n self.log_error()\n return None\n\n def instantiate_all_objects(self):\n with self.lock:\n #make a list first for iteration, we can't iterate over the model,\n # as the instantiation of object might produce new nodes while we iterate\n objects = [k for k,v in self.model.items() if v[\"type\"] == \"object\"]\n\n for id in objects:\n try:\n self.instantiate_object(id)\n except:\n self.log_error()\n\n def reset_all_objects(self):\n with self.lock:\n #make a list first for iteration, we can't iterate over the model,\n # as the instantiation of object might produce new nodes while we iterate\n objects = [k for k,v in self.model.items() if v[\"type\"] == \"object\"]\n\n for id in objects:\n try:\n self.get_object(id).reset(None)\n except:\n self.log_error()\n\n\n\n def global_auto_reload_enabled(self):\n if self.get_value(\"root.system.enableAutoReload\") == False:\n return False\n else:\n return True # this will also be the case if the node is not there, as the get_value return None then\n\n\n\n def create_test(self,testNo=1):\n \"\"\"\n this functions crates tests for demostrating purposes\n \"\"\"\n if testNo == 1:\n self.create_node(\"root\",name=\"variables\",type=\"folder\")\n for var in [\"f0\",\"f1\",\"f2\",\"f3\",\"count\",\"time\",\"back\"]:\n self.create_node(\"root.variables\",name=var,type=\"column\")\n self.create_node_from_path('root.folder2.myconst',{\"type\":\"const\",\"value\":\"21data\"})\n self.create_node_from_path('root.folder2.myfkt', {\"type\": \"function\"})\n\n #for the visu\n self.create_node_from_path('root.visualization.pipelines.occupancy.url',{\"type\":\"const\",\"value\":\"http://localhost:5006/bokeh_web\"})\n self.create_node_from_path('root.visualization.pipelines.demo2.url',{\"type\":\"const\",\"value\":\"http://21data.io\"})\n\n\n #create an official table\n template = [\n {\n \"name\": \"description\",\n \"type\": \"const\",\n \"value\": \"this is a great table\"\n },\n {\n \"name\": \"columns\",\n \"type\": \"referencer\",\n },\n {\n \"name\": \"timeField\",\n \"type\": \"referencer\",\n },\n {\n \"name\": \"numberOfRows\",\n \"type\": \"variable\",\n \"value\": 0\n }\n\n ]\n self.create_node(\"root\", name=\"mytable\", type=\"table\")\n self.create_nodes_from_template(\"root.mytable\", template=template)\n for var in [\"f0\",\"f1\",\"f2\",\"f3\",\"time\",\"back\"]:\n self.add_forward_refs(\"root.mytable.columns\",[\"root.variables.\"+var])\n self.add_forward_refs(\"root.mytable.timeField\", [\"root.variables.time\"])\n\n #add data\n startTime=datetime.datetime(2018,1,1,0,0,0,tzinfo=pytz.UTC)\n vars={\"f0\":0.01,\"f1\":0.02,\"f2\":0.04,\"f3\":0.1,\"back\":0.01}\n SIZE = 10*60 # in seconds units\n STEP = 0.1\n #!!! we are producing size/step time points\n\n \"\"\" for i in range(SIZE):\n dataDict = {}\n for var in vars:\n value = numpy.cos(2*numpy.pi*vars[var]*i/SIZE*3)\n dataDict[\"root.variables.\"+var]=value\n mytime = startTime + datetime.timedelta(seconds = i)\n dataDict[\"root.variables.time\"] = mytime\n #print(mytime)\n self.add_timeseries(dataDict)\n \"\"\"\n\n startEpoch = date2secs(startTime)\n times = numpy.arange(startEpoch,startEpoch+SIZE,STEP,dtype=numpy.float64)\n print(\"we have time:\",times.shape)\n for var in vars:\n values = numpy.cos(2*numpy.pi*vars[var]*times)\n id=self.get_id(\"root.variables.\"+str(var))\n if var ==\"back\":\n #we make -1,0,1 out of it\n values = numpy.round(values)\n self.model[id][\"value\"]=values.tolist()\n id = self.get_id(\"root.variables.time\")\n self.model[id][\"value\"]=(times).tolist()\n #now correct the background\n\n\n\n #now make some widget stuff\n self.create_node_from_path('root.visualization.widgets.timeseriesOne',{\"type\":\"widget\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectableVariables',\n {\"type\":\"referencer\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectedVariables',\n {\"type\": \"referencer\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.startTime',\n {\"type\": \"variable\",\"value\":None})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.endTime',\n {\"type\": \"variable\",\"value\":None})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.bins',\n {\"type\": \"const\",\"value\":300})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation',\n {\"type\": \"const\", \"value\": True})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasSelection',\n {\"type\": \"const\", \"value\": False})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.annotations',\n {\"type\": \"referencer\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations',\n {\"type\": \"folder\"})\n\n\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.tags',\n {\"type\": \"const\",\"value\":[\"one\",\"two\"]})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.colors',\n {\"type\": \"const\",\"value\":[\"yellow\",\"brown\",\"greay\",\"green\",\"red\"]})\n\n\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.table',\n {\"type\": \"referencer\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.lineColors',\n {\"type\": \"const\", \"value\": [\"blue\", \"yellow\", \"brown\", \"grey\", \"red\"]})\n self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectedVariables',['root.variables.f0','root.variables.f1','root.variables.f3'])\n self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectableVariables',['root.variables'])\n self.add_forward_refs('root.visualization.widgets.timeseriesOne.table',['root.mytable'])\n\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.observer',{\"type\":\"referencer\"})\n self.create_node_from_path('root.visualization.widgets.timeseriesOne.observerUpdate', {\"type\": \"const\",\"value\":[\"line\",\"background\",\"annotations\"]})\n\n\n\n #now the annotations\n anno = [\n {\n \"name\": \"tags\",\n \"type\": \"const\",\n \"value\": [\"one\",\"two\"]\n },\n {\n \"name\": \"startTime\",\n \"type\": \"const\",\n \"value\": None\n },\n {\n \"name\": \"endTime\",\n \"type\": \"const\",\n \"value\": None\n },\n {\n \"name\": \"text\",\n \"type\": \"const\",\n \"value\": \"this is a great annotation\"\n }\n\n ]\n\n tags=[\"one\",\"two\",\"one\",\"one\",\"two\",\"two\",\"one\",\"one\",\"one\",\"two\",\"one\",\"one\"]\n self.create_node_from_path(\"root.annotations\",{\"type\":\"folder\"})\n startTime = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)\n for i in range(10):\n newAnno = copy.deepcopy(anno)\n newAnno[1][\"value\"] = (startTime + datetime.timedelta(minutes=(i*10))).isoformat()\n newAnno[2][\"value\"] = (startTime + datetime.timedelta(minutes=(i*10+1))).isoformat()\n newAnno[0][\"value\"] = [tags[i],tags[i+1]]\n newAnnoPath = \"root.annotations.anno\"+str(i)\n self.create_node_from_path(newAnnoPath,{\"type\":\"annotation\"})\n self.create_nodes_from_template(newAnnoPath,newAnno)\n\n #also add the annotations to the widget\n self.add_forward_refs(\"root.visualization.widgets.timeseriesOne.hasAnnotation.annotations\",[\"root.annotations\",\"root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations\"])\n\n\n #make a real function\n self.create_node_from_path(\"root.functions\",{\"type\":\"folder\"})\n self.create_nodes_from_template(\"root.functions\",[self.templates[\"testfunction.delayFunctionTemplate\"]])\n\n #now make cutom function to trigger something\n self.create_nodes_from_template(\"root.functions\",[self.templates[\"counterfunction.counterFunctionTemplate\"]])\n #now hook the function output to the observer of the plot\n self.add_forward_refs('root.visualization.widgets.timeseriesOne.observer',['root.functions.counterFunction.output'])\n\n #now make custom buttons\n buttons = [\n {\n \"name\":\"button1\",\n \"type\":\"folder\",\n \"children\":[\n {\"name\":\"caption\",\"type\":\"const\",\"value\":\"start learner\"},\n {\"name\":\"counter\", \"type\": \"variable\", \"value\":0},\n {\"name\": \"onClick\", \"type\": \"referencer\"}\n ]\n }\n ]\n self.create_node_from_path(\"root.visualization.widgets.timeseriesOne.buttons\",{\"type\":\"folder\"})\n self.create_nodes_from_template(\"root.visualization.widgets.timeseriesOne.buttons\",buttons)\n self.add_forward_refs(\"root.visualization.widgets.timeseriesOne.buttons.button1.onClick\",[\"root.functions.counterFunction\"])\n\n\n #now the backgrounds\n self.create_node_from_path(\"root.visualization.widgets.timeseriesOne.hasBackground\",{\"type\":\"const\",\"value\":True})\n self.create_node_from_path(\"root.visualization.widgets.timeseriesOne.background\",{\"type\":\"referencer\"})\n self.add_forward_refs(\"root.visualization.widgets.timeseriesOne.background\",[\"root.variables.back\"])\n self.create_node_from_path(\"root.visualization.widgets.timeseriesOne.backgroundMap\",{\"type\":\"const\",\"value\":{\"1\":\"red\",\"0\":\"green\",\"-1\":\"blue\",\"default\":\"white\"}})\n\n\n self.show()\n\n elif testNo == 2:\n #we take the full test number 1 and rearrange some things\n self.create_test(1)\n self.currentModelName = \"occupancydemo\"\n\n import data.occupancy_data.occupancy as occ\n occData = occ.read_occupancy(\"./data/occupancy_data/datatest2.txt\")\n\n #create an official table\n template = [\n {\n \"name\": \"description\",\n \"type\": \"const\",\n \"value\": \"this is the occupancy data table\"\n },\n {\n \"name\": \"columns\",\n \"type\": \"referencer\",\n },\n {\n \"name\": \"timeField\",\n \"type\": \"referencer\",\n },\n {\n \"name\": \"variables\",\n \"type\": \"folder\",\n }\n\n ]\n self.create_node(\"root\", name=\"occupancy\", type=\"table\")\n self.create_nodes_from_template(\"root.occupancy\", template=template)\n for var in occData:\n path = \"root.occupancy.variables.\"+var\n self.create_node_from_path(path,{\"type\":\"column\"})\n self.set_value(path,occData[var])\n self.add_forward_refs(\"root.occupancy.columns\",[path])\n self.add_forward_refs(\"root.occupancy.timeField\",[\"root.occupancy.variables.date\"])\n #now create the classification\n self.create_node(\"root.occupancy\", name=\"classification\", type=\"column\")\n self.set_value(\"root.occupancy.classification\", [0]*len(occData[list(occData.keys())[0]]))\n self.add_forward_refs(\"root.occupancy.columns\", [\"root.occupancy.classification\"])\n\n #create another TS-widget\n self.create_node_from_path('root.visualization.widgets.timeseriesOccupancy', {\"type\": \"widget\"})\n self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy',modeltemplates.timeseriesWidget)\n self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy.buttons.button1',modeltemplates.button)\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectedVariables',[\"root.occupancy.variables.Temperature\"])\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectableVariables',[\"root.occupancy.variables\"])\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.table',['root.occupancy'])\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.background',['root.occupancy.classification'])\n self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {\"0\": \"brown\", \"1\": \"yellow\", \"-1\": \"blue\", \"default\": \"white\"}) #match annotation colors\n #self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {\"0\": \"blue\", \"1\": \"black\", \"-1\": \"blue\", \"default\": \"white\"}) #match annotation colors\n self.set_value('root.visualization.widgets.timeseriesOccupancy.hasAnnotation.tags',[\"busy\",\"free\"])\n\n #now create the logistic regression\n self.create_nodes_from_template('root',[self.templates[\"logisticregression.logisticRegressionTemplate\"]])\n self.add_forward_refs('root.logisticRegression.input',['root.occupancy.variables.Temperature', 'root.occupancy.variables.Light','root.occupancy.variables.CO2'])\n self.add_forward_refs('root.logisticRegression.output', ['root.occupancy.classification'])\n self.add_forward_refs('root.logisticRegression.annotations',['root.visualization.widgets.timeseriesOccupancy.hasAnnotation.newAnnotations'])\n self.set_value('root.logisticRegression.categoryMap', {\"busy\": 1, \"free\": 0})\n #also hook the button on it\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.buttons.button1.onClick',['root.logisticRegression'])\n\n self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.observer',['root.logisticRegression.executionCounter']) # observe the execution of the scorer\n\n self.show()\n\n elif testNo == 3:\n # make some nodes\n\n for id in range(10):\n self.create_node_from_path(\"root.add.var\"+str(id), {\"type\": \"variable\", \"value\": id+100})\n for id in range(100):\n self.create_node_from_path(\"root.remove.var\"+str(id), {\"type\": \"variable\", \"value\": id+100})\n\n self.create_node_from_path(\"root.change_name_one\")\n self.create_node_from_path(\"root.change_value\")\n\n self.create_node_from_path(\"root.move.first\")\n self.create_node_from_path(\"root.move.second\")\n\n self.create_node_from_path(\"root.refs\",properties={\"type\":\"referencer\"})\n self.add_forward_refs(\"root.refs\",[\"root.move.first\",\"root.move.second\",\"root.move\"])\n\n\n #now start a thread that changes the tree periodically\n\n def __update_tree():\n while True:\n time.sleep(3.0)\n with self.lock:\n self.logger.debug(\"__update_tree\")\n\n self.create_node_from_path(\"root.add.dyn\"+str(uuid.uuid4()))\n removeFolder = self.get_id(\"root.remove\")\n if self.model[removeFolder][\"children\"]:\n self.delete_node(self.model[removeFolder][\"children\"][0])\n\n id = self.get_id(\"root.change_name_one\")\n if id:\n self.model[id][\"name\"]=\"change_name_two\"\n else:\n id = self.get_id(\"root.change_name_two\")\n self.model[id][\"name\"]=\"change_name_one\"\n\n id = self.get_id(\"root.move\")\n self.model[id][\"children\"].reverse()\n\n id=self.get_id(\"root.refs\")\n self.model[id][\"forwardRefs\"].reverse()\n\n self.set_value(\"root.change_value\",int(uuid.uuid4())%100)\n\n\n self.testThread = threading.Thread(target=__update_tree)\n self.testThread.start()\n\n\n\n\n\n\nif __name__ == '__main__':\n\n\n def test1():\n m=Model()\n m.create_node(\"root\",name=\"folder1\")\n m.create_node(\"root.folder1\",name=\"folder2\")\n m.create_node(\"2\",name=\"second\")\n m.create_node(\"root\",name=\"myreferencer\",type=\"referencer\")\n m.create_node(\"root.folder1\",name=\"myvar\",type=\"variable\")\n m.set_value(\"root.folder1.myvar\",44.5)\n m.add_forward_refs(\"root.myreferencer\",[\"root.folder1\"])\n m.add_property(\"root.folder1.folder2\",\"uasource\",\"192.168.5.6\")\n m.show()\n m.get_model()\n m.delete_node(\"root.myreferencer\")\n\n return m\n\n\n def test_template():\n m=Model()\n template = {\n \"myfunction\": {\n \"type\": \"function\",\n \"value\": \"someValue\",\n \"opcua\":\"opc.tcp://129.160.1.1:4880::n2=2;s=mystrin\"\n },\n \"myreferencer\": {\n \"type\": \"referencer\",\n \"forwardRefs\": ['.myfolder.var1', '.myfolder.var2', '.myfolder.var3']\n },\n \"myfolder\": {\n \"type\": \"folder\",\n \"children\": {\n \"var1\": {\"type\": \"const\", \"value\": \"1\"},\n \"var2\": {\"type\": \"variable\"},\n \"var3\": {\"type\": \"timeseries\"},\n }\n },\n\n }\n m.create_nodes_from_template(template=template)\n m.show()\n\n def save_test():\n print(\"save and load test\")\n m=Model()\n m.create_test()\n\n m.save(\"savetest\")\n\n n=Model()\n n.load(\"savetest\")\n\n if len(n.get_model())!= len(m.get_model()):\n print(\"unequal size\")\n return False\n #now compare\n mModel = m.get_model()\n nModel = n.get_model()\n for nodeId in mModel:\n #print(\"check\",nodeId)\n try:\n if nModel[nodeId]!=mModel[nodeId]:\n print(\"unequal before after \",nodeId,m[nodeId],n[nodeId])\n return False\n except:\n print(\"cant find\",nodeId)\n return False\n\n print(\"savetest passed\")\n return True\n\n\n def plugintest():\n m=Model()\n m.create_node(\"root\", name=\"folder1\")\n m.create_nodes_from_template(\"root.folder1\",m.templates[\"testfunction.delayFunctionTemplate\"])\n m.show()\n m.execute_function(\"root.folder1.delayFunction\")\n statusNode = m.get_node(\"root.folder1.delayFunction.status\")\n progressNode = m.get_node(\"root.folder1.delayFunction.progress\")\n\n while(statusNode.get_value()!=\"finished\"):\n print(\"progress is\",progressNode.get_value())\n time.sleep(0.3)\n\n\n\n print(\"execution re===================\")\n m.show()\n\n def getnodetest():\n m=Model()\n m.create_node(\"root\", name=\"folder1\")\n m.create_node(\"root.folder1\", name=\"folder2\")\n m.create_node(\"root.folder1\", name=\"myvar\", type=\"variable\")\n myvar = m.get_node(\"root.folder1.myvar\")\n myvar.set_value(33)\n print(\"value\",myvar.get_value())\n\n\n\n\n\n def testfunctions_test():\n m = Model()\n m.create_test(1)\n m.show()\n\n table= m.get_timeseries_table([\"root.variables.f0\",\"root.variables.f1\",\"root.variables.time\"],noBins=25)\n print(\"shape\",table.shape)\n for row in table.T:\n for elem in row:\n print(str(\"%3.7f\"%elem),\" \",end=\"\")\n print(\"\")\n\n def time_conver_test():\n d1=datetime.datetime(2018,1,1,0,0,0,tzinfo = pytz.UTC)\n print(d1)\n s1 = date2secs(d1)\n print(s1)\n d2 = secs2date(s1)\n print(d2)\n\n d3 =\"2018-01-01T00:10:08.445+02:00\"\n print(d3)\n d4=dateutil.parser.parse(d3)\n print(d4)\n s4=date2secs(d4)\n print(s4)\n d5=secs2date(s4)\n print(d5)\n\n def table_test():\n m=Model()\n print(\"this test creates a table and writes some data in\")\n\n template = [\n {\n \"name\": \"type\",\n \"type\": \"const\",\n \"value\": \"timeSeriesTable\"\n },\n {\n \"name\":\"description\",\n \"type\": \"const\",\n \"value\": \"this is a great table\"\n },\n {\n \"name\":\"data\",\n \"type\":\"folder\",\n \"children\":[\n {\"name\":\"var1\",\"type\": \"column\",\"value\":[]},\n {\"name\":\"var2\",\"type\": \"column\",\"value\":[]},\n {\"name\":\"var3\",\"type\": \"column\",\"value\":[]},\n {\"name\":\"time\",\"type\": \"column\",\"value\":[]}\n ]\n },\n {\n \"name\":\"columns\",\n \"type\": \"referencer\",\n \"forwardRefs\": ['.data.var1', '.data.var2', '.data.var3',\".data.time\"]\n },\n {\n \"name\":\"timeField\",\n \"type\": \"referencer\",\n \"forwardRefs\":['.data.time']\n },\n {\n \"name\": \"numberOfRows\",\n \"type\": \"variable\",\n \"value\":0\n }\n\n ]\n m.create_node(\"root\", name=\"mytable\",type=\"table\")\n m.create_nodes_from_template(\"root.mytable\",template=template)\n m.show()\n\n #now write some data with autocreates\n mytime = datetime.datetime.now(pytz.timezone(\"CET\"))\n myepoch=date2secs(mytime)\n blob = {\"root.mytable.data.var1\":1,\"root.mytable.data.var2\":2,\"root.mytable.data.time\":myepoch,\"root.mytable.data.newvar\":99}\n m.append_table(blob)\n m.show()\n\n #now add more data but leave out var\n blob = {\"root.mytable.data.var1\": 10, \"root.mytable.data.var2\": 20, \"root.mytable.data.time\": myepoch}\n m.append_table(blob)\n blob = {\"root.mytable.data.var1\": 10, \"root.mytable.data.var2\": 20, \"root.mytable.data.var4\": 4, \"root.mytable.data.time\": myepoch}\n m.append_table(blob)\n m.show()\n\n\n def test_table_autocreate():\n mytime = datetime.datetime.now(pytz.timezone(\"CET\"))\n myepoch=date2secs(mytime)\n blob = {\"root.data.var1\":1,\"root.data.var2\":2,\"root.folder.time\":myepoch,\"root.data.newvar\":99}\n m=Model()\n m.append_table(blob)\n m.show()\n\n\n def test_create_from_path():\n m=Model()\n m.create_node_from_path(\"root.myfolder.myfolder2.var\",{\"type\":\"variable\",\"value\":33})\n m.show()\n\n def test_get_children():\n m=Model()\n m.create_test()\n nodes = m.get_node_with_children('root.folder2')\n #lastnode = '10'\n #print(m.get_path(lastnode))\n print(json.dumps(nodes,indent=4))\n\n def test_create():\n m=Model()\n m.create_test(1)\n m.show()\n def test_get_forwards():#\n #in this test, we check the forwards get results over folders, referencers etc.\n m=Model()\n m.create_node_from_path(\"root.folder.var1\",{\"type\":\"variable\"})\n m.create_node_from_path(\"root.folder.var2\", {\"type\": \"variable\"})\n m.create_node_from_path(\"root.folder.var3\", {\"type\": \"variable\"})\n m.create_node_from_path(\"root.ref1\", {\"type\": \"referencer\"})\n m.create_node_from_path(\"root.ref2\", {\"type\": \"referencer\"})\n m.add_forward_refs(\"root.ref1\",[\"root.folder\"])\n m.add_forward_refs(\"root.ref2\", [\"root.ref1\"])\n\n m.show()\n\n res=m.get_leaves(\"root.ref1\")\n print(res)\n for k in res:\n print(k[\"name\"])\n res = m.get_leaves(\"root.ref2\")\n for k in res:\n print(k[\"name\"])\n\n def pickle_save():\n import pickle\n m=Model()\n m.create_test(2)\n # write python dict to a file\n\n output = open('pickle_save.pkl', 'wb')\n pickle.dump(m.get_model(), output)\n output.close()\n\n n=Model()\n # read python dict back from the file\n pkl_file = open('pickle_save.pkl', 'rb')\n restore = pickle.load(pkl_file)\n pkl_file.close()\n\n print(\"compare after pickle restre\",restore==m.get_model())\n\n\nif __name__ == '__main__':\n #############\n\n\n #test1()\n #ts_test1()\n #test_template()\n save_test()\n pickle_save()\n #plugintest()\n #getnodetest()\n #table_query_test()\n #testfunctions_test()\n #time_conver_test()\n\n #test_create_from_path()\n #table_test()\n #test_table_autocreate()\n #test_get_children()\n #test_get_forwards()\n #test_create()\n\n #read in the commmand line options:\n # demo1: create the test for the demo1, and store it in file (option2)\n #\n if len(sys.argv) > 1:\n if sys.argv[1] == \"demo1\":\n fileName = sys.argv[2]\n print(\"creating demo and save as \",fileName)\n m = Model()\n m.create_test()\n m.show()\n fileName = sys.argv[2]\n m.save(fileName)\n\n\n"
]
| [
[
"numpy.full",
"numpy.asarray",
"numpy.round",
"numpy.where",
"numpy.arange",
"numpy.isscalar",
"numpy.cos",
"numpy.append",
"numpy.linspace",
"numpy.insert"
]
]
|
ingako/pgmpy | [
"a695e3fc97a467855803f18c0ede7600c98e13ab"
]
| [
"pgmpy/tests/test_models/test_BayesianModel.py"
]
| [
"import unittest\n\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as np_test\n\nfrom pgmpy.models import BayesianModel, MarkovModel\nfrom pgmpy.base import DAG\nimport pgmpy.tests.help_functions as hf\nfrom pgmpy.factors.discrete import (\n TabularCPD,\n JointProbabilityDistribution,\n DiscreteFactor,\n)\nfrom pgmpy.independencies import Independencies\nfrom pgmpy.estimators import (\n BayesianEstimator,\n BaseEstimator,\n MaximumLikelihoodEstimator,\n)\nfrom pgmpy.base import DAG\n\n\nclass TestBaseModelCreation(unittest.TestCase):\n def setUp(self):\n self.G = BayesianModel()\n\n def test_class_init_without_data(self):\n self.assertIsInstance(self.G, nx.DiGraph)\n\n def test_class_init_with_data_string(self):\n self.g = BayesianModel([(\"a\", \"b\"), (\"b\", \"c\")])\n self.assertListEqual(sorted(self.g.nodes()), [\"a\", \"b\", \"c\"])\n self.assertListEqual(\n hf.recursive_sorted(self.g.edges()), [[\"a\", \"b\"], [\"b\", \"c\"]]\n )\n\n def test_class_init_with_data_nonstring(self):\n BayesianModel([(1, 2), (2, 3)])\n\n def test_add_node_string(self):\n self.G.add_node(\"a\")\n self.assertListEqual(list(self.G.nodes()), [\"a\"])\n\n def test_add_node_nonstring(self):\n self.G.add_node(1)\n\n def test_add_nodes_from_string(self):\n self.G.add_nodes_from([\"a\", \"b\", \"c\", \"d\"])\n self.assertListEqual(sorted(self.G.nodes()), [\"a\", \"b\", \"c\", \"d\"])\n\n def test_add_nodes_from_non_string(self):\n self.G.add_nodes_from([1, 2, 3, 4])\n\n def test_add_edge_string(self):\n self.G.add_edge(\"d\", \"e\")\n self.assertListEqual(sorted(self.G.nodes()), [\"d\", \"e\"])\n self.assertListEqual(list(self.G.edges()), [(\"d\", \"e\")])\n self.G.add_nodes_from([\"a\", \"b\", \"c\"])\n self.G.add_edge(\"a\", \"b\")\n self.assertListEqual(\n hf.recursive_sorted(self.G.edges()), [[\"a\", \"b\"], [\"d\", \"e\"]]\n )\n\n def test_add_edge_nonstring(self):\n self.G.add_edge(1, 2)\n\n def test_add_edge_selfloop(self):\n self.assertRaises(ValueError, self.G.add_edge, \"a\", \"a\")\n\n def test_add_edge_result_cycle(self):\n self.G.add_edges_from([(\"a\", \"b\"), (\"a\", \"c\")])\n self.assertRaises(ValueError, self.G.add_edge, \"c\", \"a\")\n\n def test_add_edges_from_string(self):\n self.G.add_edges_from([(\"a\", \"b\"), (\"b\", \"c\")])\n self.assertListEqual(sorted(self.G.nodes()), [\"a\", \"b\", \"c\"])\n self.assertListEqual(\n hf.recursive_sorted(self.G.edges()), [[\"a\", \"b\"], [\"b\", \"c\"]]\n )\n self.G.add_nodes_from([\"d\", \"e\", \"f\"])\n self.G.add_edges_from([(\"d\", \"e\"), (\"e\", \"f\")])\n self.assertListEqual(sorted(self.G.nodes()), [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"])\n self.assertListEqual(\n hf.recursive_sorted(self.G.edges()),\n hf.recursive_sorted([(\"a\", \"b\"), (\"b\", \"c\"), (\"d\", \"e\"), (\"e\", \"f\")]),\n )\n\n def test_add_edges_from_nonstring(self):\n self.G.add_edges_from([(1, 2), (2, 3)])\n\n def test_add_edges_from_self_loop(self):\n self.assertRaises(ValueError, self.G.add_edges_from, [(\"a\", \"a\")])\n\n def test_add_edges_from_result_cycle(self):\n self.assertRaises(\n ValueError, self.G.add_edges_from, [(\"a\", \"b\"), (\"b\", \"c\"), (\"c\", \"a\")]\n )\n\n def test_update_node_parents_bm_constructor(self):\n self.g = BayesianModel([(\"a\", \"b\"), (\"b\", \"c\")])\n self.assertListEqual(list(self.g.predecessors(\"a\")), [])\n self.assertListEqual(list(self.g.predecessors(\"b\")), [\"a\"])\n self.assertListEqual(list(self.g.predecessors(\"c\")), [\"b\"])\n\n def test_update_node_parents(self):\n self.G.add_nodes_from([\"a\", \"b\", \"c\"])\n self.G.add_edges_from([(\"a\", \"b\"), (\"b\", \"c\")])\n self.assertListEqual(list(self.G.predecessors(\"a\")), [])\n self.assertListEqual(list(self.G.predecessors(\"b\")), [\"a\"])\n self.assertListEqual(list(self.G.predecessors(\"c\")), [\"b\"])\n\n def tearDown(self):\n del self.G\n\n\nclass TestBayesianModelMethods(unittest.TestCase):\n def setUp(self):\n self.G = BayesianModel([(\"a\", \"d\"), (\"b\", \"d\"), (\"d\", \"e\"), (\"b\", \"c\")])\n self.G1 = BayesianModel([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n diff_cpd = TabularCPD(\"diff\", 2, values=[[0.2], [0.8]])\n intel_cpd = TabularCPD(\"intel\", 3, values=[[0.5], [0.3], [0.2]])\n grade_cpd = TabularCPD(\n \"grade\",\n 3,\n values=[\n [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],\n [0.8, 0.8, 0.8, 0.8, 0.8, 0.8],\n ],\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 3],\n )\n self.G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)\n self.G2 = BayesianModel([(\"d\", \"g\"), (\"g\", \"l\"), (\"i\", \"g\"), (\"i\", \"l\")])\n\n def test_moral_graph(self):\n moral_graph = self.G.moralize()\n self.assertListEqual(sorted(moral_graph.nodes()), [\"a\", \"b\", \"c\", \"d\", \"e\"])\n for edge in moral_graph.edges():\n self.assertTrue(\n edge in [(\"a\", \"b\"), (\"a\", \"d\"), (\"b\", \"c\"), (\"d\", \"b\"), (\"e\", \"d\")]\n or (edge[1], edge[0])\n in [(\"a\", \"b\"), (\"a\", \"d\"), (\"b\", \"c\"), (\"d\", \"b\"), (\"e\", \"d\")]\n )\n\n def test_moral_graph_with_edge_present_over_parents(self):\n G = BayesianModel([(\"a\", \"d\"), (\"d\", \"e\"), (\"b\", \"d\"), (\"b\", \"c\"), (\"a\", \"b\")])\n moral_graph = G.moralize()\n self.assertListEqual(sorted(moral_graph.nodes()), [\"a\", \"b\", \"c\", \"d\", \"e\"])\n for edge in moral_graph.edges():\n self.assertTrue(\n edge in [(\"a\", \"b\"), (\"c\", \"b\"), (\"d\", \"a\"), (\"d\", \"b\"), (\"d\", \"e\")]\n or (edge[1], edge[0])\n in [(\"a\", \"b\"), (\"c\", \"b\"), (\"d\", \"a\"), (\"d\", \"b\"), (\"d\", \"e\")]\n )\n\n def test_get_ancestors_of_success(self):\n ancenstors1 = self.G2._get_ancestors_of(\"g\")\n ancenstors2 = self.G2._get_ancestors_of(\"d\")\n ancenstors3 = self.G2._get_ancestors_of([\"i\", \"l\"])\n self.assertEqual(ancenstors1, {\"d\", \"i\", \"g\"})\n self.assertEqual(ancenstors2, {\"d\"})\n self.assertEqual(ancenstors3, {\"g\", \"i\", \"l\", \"d\"})\n\n def test_get_ancestors_of_failure(self):\n self.assertRaises(ValueError, self.G2._get_ancestors_of, \"h\")\n\n def test_get_cardinality(self):\n self.assertDictEqual(\n self.G1.get_cardinality(), {\"diff\": 2, \"intel\": 3, \"grade\": 3}\n )\n\n def test_get_cardinality_with_node(self):\n self.assertEqual(self.G1.get_cardinality(\"diff\"), 2)\n self.assertEqual(self.G1.get_cardinality(\"intel\"), 3)\n self.assertEqual(self.G1.get_cardinality(\"grade\"), 3)\n\n def test_local_independencies(self):\n self.assertEqual(\n self.G.local_independencies(\"a\"), Independencies([\"a\", [\"b\", \"c\"]])\n )\n self.assertEqual(\n self.G.local_independencies(\"c\"),\n Independencies([\"c\", [\"a\", \"d\", \"e\"], \"b\"]),\n )\n self.assertEqual(\n self.G.local_independencies(\"d\"), Independencies([\"d\", \"c\", [\"b\", \"a\"]])\n )\n self.assertEqual(\n self.G.local_independencies(\"e\"),\n Independencies([\"e\", [\"c\", \"b\", \"a\"], \"d\"]),\n )\n self.assertEqual(self.G.local_independencies(\"b\"), Independencies([\"b\", \"a\"]))\n self.assertEqual(self.G1.local_independencies(\"grade\"), Independencies())\n\n def test_get_independencies(self):\n chain = BayesianModel([(\"X\", \"Y\"), (\"Y\", \"Z\")])\n self.assertEqual(\n chain.get_independencies(), Independencies((\"X\", \"Z\", \"Y\"), (\"Z\", \"X\", \"Y\"))\n )\n fork = BayesianModel([(\"Y\", \"X\"), (\"Y\", \"Z\")])\n self.assertEqual(\n fork.get_independencies(), Independencies((\"X\", \"Z\", \"Y\"), (\"Z\", \"X\", \"Y\"))\n )\n collider = BayesianModel([(\"X\", \"Y\"), (\"Z\", \"Y\")])\n self.assertEqual(\n collider.get_independencies(), Independencies((\"X\", \"Z\"), (\"Z\", \"X\"))\n )\n\n def test_is_imap(self):\n val = [\n 0.01,\n 0.01,\n 0.08,\n 0.006,\n 0.006,\n 0.048,\n 0.004,\n 0.004,\n 0.032,\n 0.04,\n 0.04,\n 0.32,\n 0.024,\n 0.024,\n 0.192,\n 0.016,\n 0.016,\n 0.128,\n ]\n JPD = JointProbabilityDistribution([\"diff\", \"intel\", \"grade\"], [2, 3, 3], val)\n fac = DiscreteFactor([\"diff\", \"intel\", \"grade\"], [2, 3, 3], val)\n self.assertTrue(self.G1.is_imap(JPD))\n self.assertRaises(TypeError, self.G1.is_imap, fac)\n\n def test_markov_blanet(self):\n G = DAG(\n [\n (\"x\", \"y\"),\n (\"z\", \"y\"),\n (\"y\", \"w\"),\n (\"y\", \"v\"),\n (\"u\", \"w\"),\n (\"s\", \"v\"),\n (\"w\", \"t\"),\n (\"w\", \"m\"),\n (\"v\", \"n\"),\n (\"v\", \"q\"),\n ]\n )\n self.assertEqual(\n set(G.get_markov_blanket(\"y\")), set([\"s\", \"w\", \"x\", \"u\", \"z\", \"v\"])\n )\n\n def test_get_immoralities(self):\n G = BayesianModel([(\"x\", \"y\"), (\"z\", \"y\"), (\"x\", \"z\"), (\"w\", \"y\")])\n self.assertEqual(G.get_immoralities(), {(\"w\", \"x\"), (\"w\", \"z\")})\n G1 = BayesianModel([(\"x\", \"y\"), (\"z\", \"y\"), (\"z\", \"x\"), (\"w\", \"y\")])\n self.assertEqual(G1.get_immoralities(), {(\"w\", \"x\"), (\"w\", \"z\")})\n G2 = BayesianModel([(\"x\", \"y\"), (\"z\", \"y\"), (\"x\", \"z\"), (\"w\", \"y\"), (\"w\", \"x\")])\n self.assertEqual(G2.get_immoralities(), {(\"w\", \"z\")})\n\n def test_is_iequivalent(self):\n G = BayesianModel([(\"x\", \"y\"), (\"z\", \"y\"), (\"x\", \"z\"), (\"w\", \"y\")])\n self.assertRaises(TypeError, G.is_iequivalent, MarkovModel())\n G1 = BayesianModel([(\"V\", \"W\"), (\"W\", \"X\"), (\"X\", \"Y\"), (\"Z\", \"Y\")])\n G2 = BayesianModel([(\"W\", \"V\"), (\"X\", \"W\"), (\"X\", \"Y\"), (\"Z\", \"Y\")])\n self.assertTrue(G1.is_iequivalent(G2))\n G3 = BayesianModel([(\"W\", \"V\"), (\"W\", \"X\"), (\"Y\", \"X\"), (\"Z\", \"Y\")])\n self.assertFalse(G3.is_iequivalent(G2))\n\n def test_copy(self):\n model_copy = self.G1.copy()\n self.assertEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))\n self.assertEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))\n self.assertNotEqual(\n id(self.G1.get_cpds(\"diff\")), id(model_copy.get_cpds(\"diff\"))\n )\n\n self.G1.remove_cpds(\"diff\")\n diff_cpd = TabularCPD(\"diff\", 2, values=[[0.3], [0.7]])\n self.G1.add_cpds(diff_cpd)\n self.assertNotEqual(self.G1.get_cpds(\"diff\"), model_copy.get_cpds(\"diff\"))\n\n self.G1.remove_node(\"intel\")\n self.assertNotEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))\n self.assertNotEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))\n\n def test_remove_node(self):\n self.G1.remove_node(\"diff\")\n self.assertEqual(sorted(self.G1.nodes()), sorted([\"grade\", \"intel\"]))\n self.assertRaises(ValueError, self.G1.get_cpds, \"diff\")\n\n def test_remove_nodes_from(self):\n self.G1.remove_nodes_from([\"diff\", \"grade\"])\n self.assertEqual(sorted(self.G1.nodes()), sorted([\"intel\"]))\n self.assertRaises(ValueError, self.G1.get_cpds, \"diff\")\n self.assertRaises(ValueError, self.G1.get_cpds, \"grade\")\n\n def tearDown(self):\n del self.G\n del self.G1\n\n\nclass TestBayesianModelCPD(unittest.TestCase):\n def setUp(self):\n self.G = BayesianModel([(\"d\", \"g\"), (\"i\", \"g\"), (\"g\", \"l\"), (\"i\", \"s\")])\n self.G2 = DAG([(\"d\", \"g\"), (\"i\", \"g\"), (\"g\", \"l\"), (\"i\", \"s\")])\n\n def test_active_trail_nodes(self):\n self.assertEqual(sorted(self.G2.active_trail_nodes(\"d\")[\"d\"]), [\"d\", \"g\", \"l\"])\n self.assertEqual(\n sorted(self.G2.active_trail_nodes(\"i\")[\"i\"]), [\"g\", \"i\", \"l\", \"s\"]\n )\n self.assertEqual(\n sorted(self.G2.active_trail_nodes([\"d\", \"i\"])[\"d\"]), [\"d\", \"g\", \"l\"]\n )\n\n def test_active_trail_nodes_args(self):\n self.assertEqual(\n sorted(self.G2.active_trail_nodes([\"d\", \"l\"], observed=\"g\")[\"d\"]),\n [\"d\", \"i\", \"s\"],\n )\n self.assertEqual(\n sorted(self.G2.active_trail_nodes([\"d\", \"l\"], observed=\"g\")[\"l\"]), [\"l\"]\n )\n self.assertEqual(\n sorted(self.G2.active_trail_nodes(\"s\", observed=[\"i\", \"l\"])[\"s\"]), [\"s\"]\n )\n self.assertEqual(\n sorted(self.G2.active_trail_nodes(\"s\", observed=[\"d\", \"l\"])[\"s\"]),\n [\"g\", \"i\", \"s\"],\n )\n\n def test_is_active_trail_triplets(self):\n self.assertTrue(self.G.is_active_trail(\"d\", \"l\"))\n self.assertTrue(self.G.is_active_trail(\"g\", \"s\"))\n self.assertFalse(self.G.is_active_trail(\"d\", \"i\"))\n self.assertTrue(self.G.is_active_trail(\"d\", \"i\", observed=\"g\"))\n self.assertFalse(self.G.is_active_trail(\"d\", \"l\", observed=\"g\"))\n self.assertFalse(self.G.is_active_trail(\"i\", \"l\", observed=\"g\"))\n self.assertTrue(self.G.is_active_trail(\"d\", \"i\", observed=\"l\"))\n self.assertFalse(self.G.is_active_trail(\"g\", \"s\", observed=\"i\"))\n\n def test_is_active_trail(self):\n self.assertFalse(self.G.is_active_trail(\"d\", \"s\"))\n self.assertTrue(self.G.is_active_trail(\"s\", \"l\"))\n self.assertTrue(self.G.is_active_trail(\"d\", \"s\", observed=\"g\"))\n self.assertFalse(self.G.is_active_trail(\"s\", \"l\", observed=\"g\"))\n\n def test_is_active_trail_args(self):\n self.assertFalse(self.G.is_active_trail(\"s\", \"l\", \"i\"))\n self.assertFalse(self.G.is_active_trail(\"s\", \"l\", \"g\"))\n self.assertTrue(self.G.is_active_trail(\"d\", \"s\", \"l\"))\n self.assertFalse(self.G.is_active_trail(\"d\", \"s\", [\"i\", \"l\"]))\n\n def test_get_cpds(self):\n cpd_d = TabularCPD(\"d\", 2, values=np.random.rand(2, 1))\n cpd_i = TabularCPD(\"i\", 2, values=np.random.rand(2, 1))\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"d\", \"i\"],\n evidence_card=[2, 2],\n )\n cpd_l = TabularCPD(\n \"l\", 2, values=np.random.rand(2, 2), evidence=[\"g\"], evidence_card=[2]\n )\n cpd_s = TabularCPD(\n \"s\", 2, values=np.random.rand(2, 2), evidence=[\"i\"], evidence_card=[2]\n )\n self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)\n\n self.assertEqual(self.G.get_cpds(\"d\").variable, \"d\")\n\n def test_get_cpds1(self):\n self.model = BayesianModel([(\"A\", \"AB\")])\n cpd_a = TabularCPD(\"A\", 2, values=np.random.rand(2, 1))\n cpd_ab = TabularCPD(\n \"AB\", 2, values=np.random.rand(2, 2), evidence=[\"A\"], evidence_card=[2]\n )\n\n self.model.add_cpds(cpd_a, cpd_ab)\n self.assertEqual(self.model.get_cpds(\"A\").variable, \"A\")\n self.assertEqual(self.model.get_cpds(\"AB\").variable, \"AB\")\n self.assertRaises(ValueError, self.model.get_cpds, \"B\")\n\n self.model.add_node(\"B\")\n self.assertIsNone(self.model.get_cpds(\"B\"))\n\n def test_add_single_cpd(self):\n cpd_s = TabularCPD(\"s\", 2, np.random.rand(2, 2), [\"i\"], [2])\n self.G.add_cpds(cpd_s)\n self.assertListEqual(self.G.get_cpds(), [cpd_s])\n\n def test_add_multiple_cpds(self):\n cpd_d = TabularCPD(\"d\", 2, values=np.random.rand(2, 1))\n cpd_i = TabularCPD(\"i\", 2, values=np.random.rand(2, 1))\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"d\", \"i\"],\n evidence_card=[2, 2],\n )\n cpd_l = TabularCPD(\n \"l\", 2, values=np.random.rand(2, 2), evidence=[\"g\"], evidence_card=[2]\n )\n cpd_s = TabularCPD(\n \"s\", 2, values=np.random.rand(2, 2), evidence=[\"i\"], evidence_card=[2]\n )\n\n self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)\n self.assertEqual(self.G.get_cpds(\"d\"), cpd_d)\n self.assertEqual(self.G.get_cpds(\"i\"), cpd_i)\n self.assertEqual(self.G.get_cpds(\"g\"), cpd_g)\n self.assertEqual(self.G.get_cpds(\"l\"), cpd_l)\n self.assertEqual(self.G.get_cpds(\"s\"), cpd_s)\n\n def test_check_model(self):\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),\n evidence=[\"d\", \"i\"],\n evidence_card=[2, 2],\n )\n\n cpd_s = TabularCPD(\n \"s\",\n 2,\n values=np.array([[0.2, 0.3], [0.8, 0.7]]),\n evidence=[\"i\"],\n evidence_card=[2],\n )\n\n cpd_l = TabularCPD(\n \"l\",\n 2,\n values=np.array([[0.2, 0.3], [0.8, 0.7]]),\n evidence=[\"g\"],\n evidence_card=[2],\n )\n\n self.G.add_cpds(cpd_g, cpd_s, cpd_l)\n self.assertRaises(ValueError, self.G.check_model)\n\n cpd_d = TabularCPD(\"d\", 2, values=[[0.8, 0.2]])\n cpd_i = TabularCPD(\"i\", 2, values=[[0.7, 0.3]])\n self.G.add_cpds(cpd_d, cpd_i)\n\n self.assertTrue(self.G.check_model())\n\n def test_check_model1(self):\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.array([[0.2, 0.3], [0.8, 0.7]]),\n evidence=[\"i\"],\n evidence_card=[2],\n )\n self.G.add_cpds(cpd_g)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_g)\n\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),\n evidence=[\"d\", \"s\"],\n evidence_card=[2, 2],\n )\n self.G.add_cpds(cpd_g)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_g)\n\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.array([[0.2, 0.3], [0.8, 0.7]]),\n evidence=[\"l\"],\n evidence_card=[2],\n )\n self.G.add_cpds(cpd_g)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_g)\n\n cpd_l = TabularCPD(\n \"l\",\n 2,\n values=np.array([[0.2, 0.3], [0.8, 0.7]]),\n evidence=[\"d\"],\n evidence_card=[2],\n )\n self.G.add_cpds(cpd_l)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_l)\n\n cpd_l = TabularCPD(\n \"l\",\n 2,\n values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),\n evidence=[\"d\", \"i\"],\n evidence_card=[2, 2],\n )\n self.G.add_cpds(cpd_l)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_l)\n\n cpd_l = TabularCPD(\n \"l\",\n 2,\n values=np.array(\n [\n [0.2, 0.3, 0.4, 0.6, 0.2, 0.3, 0.4, 0.6],\n [0.8, 0.7, 0.6, 0.4, 0.8, 0.7, 0.6, 0.4],\n ]\n ),\n evidence=[\"g\", \"d\", \"i\"],\n evidence_card=[2, 2, 2],\n )\n self.G.add_cpds(cpd_l)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_l)\n\n def test_check_model2(self):\n cpd_s = TabularCPD(\n \"s\",\n 2,\n values=np.array([[0.5, 0.3], [0.8, 0.7]]),\n evidence=[\"i\"],\n evidence_card=[2],\n )\n self.G.add_cpds(cpd_s)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_s)\n\n cpd_g = TabularCPD(\n \"g\",\n 2,\n values=np.array([[0.2, 0.3, 0.4, 0.6], [0.3, 0.7, 0.6, 0.4]]),\n evidence=[\"d\", \"i\"],\n evidence_card=[2, 2],\n )\n self.G.add_cpds(cpd_g)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_g)\n\n cpd_l = TabularCPD(\n \"l\",\n 2,\n values=np.array([[0.2, 0.3], [0.1, 0.7]]),\n evidence=[\"g\"],\n evidence_card=[2],\n )\n self.G.add_cpds(cpd_l)\n self.assertRaises(ValueError, self.G.check_model)\n self.G.remove_cpds(cpd_l)\n\n def tearDown(self):\n del self.G\n\n\nclass TestBayesianModelFitPredict(unittest.TestCase):\n def setUp(self):\n self.model_disconnected = BayesianModel()\n self.model_disconnected.add_nodes_from([\"A\", \"B\", \"C\", \"D\", \"E\"])\n self.model_connected = BayesianModel(\n [(\"A\", \"B\"), (\"C\", \"B\"), (\"C\", \"D\"), (\"B\", \"E\")]\n )\n\n self.model2 = BayesianModel([(\"A\", \"C\"), (\"B\", \"C\")])\n self.data1 = pd.DataFrame(data={\"A\": [0, 0, 1], \"B\": [0, 1, 0], \"C\": [1, 1, 0]})\n self.data2 = pd.DataFrame(\n data={\n \"A\": [0, np.NaN, 1],\n \"B\": [0, 1, 0],\n \"C\": [1, 1, np.NaN],\n \"D\": [np.NaN, \"Y\", np.NaN],\n }\n )\n\n # data_link - \"https://www.kaggle.com/c/titanic/download/train.csv\"\n self.titanic_data = pd.read_csv(\n \"pgmpy/tests/test_estimators/testdata/titanic_train.csv\", dtype=str\n )\n self.titanic_data2 = self.titanic_data[[\"Survived\", \"Sex\", \"Pclass\"]]\n\n def test_bayesian_fit(self):\n print(isinstance(BayesianEstimator, BaseEstimator))\n print(isinstance(MaximumLikelihoodEstimator, BaseEstimator))\n self.model2.fit(\n self.data1,\n estimator=BayesianEstimator,\n prior_type=\"dirichlet\",\n pseudo_counts={\n \"A\": [[9], [3]],\n \"B\": [[9], [3]],\n \"C\": [[9, 9, 9, 9], [3, 3, 3, 3]],\n },\n )\n self.assertEqual(\n self.model2.get_cpds(\"B\"), TabularCPD(\"B\", 2, [[11.0 / 15], [4.0 / 15]])\n )\n\n def test_fit_missing_data(self):\n self.model2.fit(\n self.data2, state_names={\"C\": [0, 1]}, complete_samples_only=False\n )\n cpds = set(\n [\n TabularCPD(\"A\", 2, [[0.5], [0.5]]),\n TabularCPD(\"B\", 2, [[2.0 / 3], [1.0 / 3]]),\n TabularCPD(\n \"C\",\n 2,\n [[0, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 0.5]],\n evidence=[\"A\", \"B\"],\n evidence_card=[2, 2],\n ),\n ]\n )\n self.assertSetEqual(cpds, set(self.model2.get_cpds()))\n\n def test_disconnected_fit(self):\n values = pd.DataFrame(\n np.random.randint(low=0, high=2, size=(1000, 5)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n )\n self.model_disconnected.fit(values)\n\n for node in [\"A\", \"B\", \"C\", \"D\", \"E\"]:\n cpd = self.model_disconnected.get_cpds(node)\n self.assertEqual(cpd.variable, node)\n np_test.assert_array_equal(cpd.cardinality, np.array([2]))\n value = (\n values.ix[:, node].value_counts()\n / values.ix[:, node].value_counts().sum()\n )\n value = value.reindex(sorted(value.index)).values\n np_test.assert_array_equal(cpd.values, value)\n\n def test_predict(self):\n titanic = BayesianModel()\n titanic.add_edges_from([(\"Sex\", \"Survived\"), (\"Pclass\", \"Survived\")])\n titanic.fit(self.titanic_data2[500:])\n\n p1 = titanic.predict(self.titanic_data2[[\"Sex\", \"Pclass\"]][:30])\n p2 = titanic.predict(self.titanic_data2[[\"Survived\", \"Pclass\"]][:30])\n p3 = titanic.predict(self.titanic_data2[[\"Survived\", \"Sex\"]][:30])\n\n p1_res = np.array(\n [\n \"0\",\n \"1\",\n \"0\",\n \"1\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"1\",\n \"0\",\n \"1\",\n \"0\",\n \"0\",\n \"0\",\n \"1\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n ]\n )\n p2_res = np.array(\n [\n \"male\",\n \"female\",\n \"female\",\n \"female\",\n \"male\",\n \"male\",\n \"male\",\n \"male\",\n \"female\",\n \"female\",\n \"female\",\n \"female\",\n \"male\",\n \"male\",\n \"male\",\n \"female\",\n \"male\",\n \"female\",\n \"male\",\n \"female\",\n \"male\",\n \"female\",\n \"female\",\n \"female\",\n \"male\",\n \"female\",\n \"male\",\n \"male\",\n \"female\",\n \"male\",\n ]\n )\n p3_res = np.array(\n [\n \"3\",\n \"1\",\n \"1\",\n \"1\",\n \"3\",\n \"3\",\n \"3\",\n \"3\",\n \"1\",\n \"1\",\n \"1\",\n \"1\",\n \"3\",\n \"3\",\n \"3\",\n \"1\",\n \"3\",\n \"1\",\n \"3\",\n \"1\",\n \"3\",\n \"1\",\n \"1\",\n \"1\",\n \"3\",\n \"1\",\n \"3\",\n \"3\",\n \"1\",\n \"3\",\n ]\n )\n\n np_test.assert_array_equal(p1.values.ravel(), p1_res)\n np_test.assert_array_equal(p2.values.ravel(), p2_res)\n np_test.assert_array_equal(p3.values.ravel(), p3_res)\n\n def test_connected_predict(self):\n np.random.seed(42)\n values = pd.DataFrame(\n np.array(np.random.randint(low=0, high=2, size=(1000, 5)), dtype=str),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n )\n fit_data = values[:800]\n predict_data = values[800:].copy()\n self.model_connected.fit(fit_data)\n self.assertRaises(ValueError, self.model_connected.predict, predict_data)\n predict_data.drop(\"E\", axis=1, inplace=True)\n e_predict = self.model_connected.predict(predict_data)\n np_test.assert_array_equal(\n e_predict.values.ravel(),\n np.array(\n [\n 1,\n 0,\n 1,\n 0,\n 1,\n 1,\n 1,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 0,\n 0,\n 1,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 0,\n 1,\n 0,\n 1,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 0,\n 0,\n 0,\n 1,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 0,\n 0,\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 0,\n 1,\n 0,\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n ],\n dtype=str,\n ),\n )\n\n def test_connected_predict_probability(self):\n np.random.seed(42)\n values = pd.DataFrame(\n np.random.randint(low=0, high=2, size=(100, 5)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n )\n fit_data = values[:80]\n predict_data = values[80:].copy()\n self.model_connected.fit(fit_data)\n predict_data.drop(\"E\", axis=1, inplace=True)\n e_prob = self.model_connected.predict_probability(predict_data)\n np_test.assert_allclose(\n e_prob.values.ravel(),\n np.array(\n [\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.57894737,\n 0.42105263,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.57894737,\n 0.42105263,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n ]\n ),\n atol=0,\n )\n predict_data = pd.DataFrame(\n np.random.randint(low=0, high=2, size=(1, 5)),\n columns=[\"A\", \"B\", \"C\", \"F\", \"E\"],\n )[:]\n\n def test_predict_probability_errors(self):\n np.random.seed(42)\n values = pd.DataFrame(\n np.random.randint(low=0, high=2, size=(2, 5)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n )\n fit_data = values[:1]\n predict_data = values[1:].copy()\n self.model_connected.fit(fit_data)\n self.assertRaises(\n ValueError, self.model_connected.predict_probability, predict_data\n )\n predict_data = pd.DataFrame(\n np.random.randint(low=0, high=2, size=(1, 5)),\n columns=[\"A\", \"B\", \"C\", \"F\", \"E\"],\n )[:]\n self.assertRaises(\n ValueError, self.model_connected.predict_probability, predict_data\n )\n\n def tearDown(self):\n del self.model_connected\n del self.model_disconnected\n\n\nclass TestDAGCPDOperations(unittest.TestCase):\n def setUp(self):\n self.graph = BayesianModel()\n\n def test_add_single_cpd(self):\n cpd = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd)\n self.assertListEqual(self.graph.get_cpds(), [cpd])\n\n def test_add_multiple_cpds(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])\n\n def test_remove_single_cpd(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.graph.remove_cpds(cpd1)\n self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])\n\n def test_remove_multiple_cpds(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.graph.remove_cpds(cpd1, cpd3)\n self.assertListEqual(self.graph.get_cpds(), [cpd2])\n\n def test_remove_single_cpd_string(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.graph.remove_cpds(\"diff\")\n self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])\n\n def test_remove_multiple_cpds_string(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.graph.remove_cpds(\"diff\", \"grade\")\n self.assertListEqual(self.graph.get_cpds(), [cpd2])\n\n def test_get_values_for_node(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.assertEqual(self.graph.get_cpds(\"diff\"), cpd1)\n self.assertEqual(self.graph.get_cpds(\"intel\"), cpd2)\n self.assertEqual(self.graph.get_cpds(\"grade\"), cpd3)\n\n def test_get_values_raises_error(self):\n cpd1 = TabularCPD(\"diff\", 2, values=np.random.rand(2, 1))\n cpd2 = TabularCPD(\"intel\", 2, values=np.random.rand(2, 1))\n cpd3 = TabularCPD(\n \"grade\",\n 2,\n values=np.random.rand(2, 4),\n evidence=[\"diff\", \"intel\"],\n evidence_card=[2, 2],\n )\n self.graph.add_edges_from([(\"diff\", \"grade\"), (\"intel\", \"grade\")])\n self.graph.add_cpds(cpd1, cpd2, cpd3)\n self.assertRaises(ValueError, self.graph.get_cpds, \"sat\")\n\n def tearDown(self):\n del self.graph\n"
]
| [
[
"numpy.array",
"numpy.random.rand",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.random.randint",
"pandas.read_csv"
]
]
|
alptezbasaran/raven | [
"fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca"
]
| [
"framework/PostProcessors/LimitSurface.py"
]
| [
"# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCreated on July 10, 2013\n\n@author: alfoa\n\"\"\"\nfrom __future__ import division, print_function , unicode_literals, absolute_import\n\n#External Modules------------------------------------------------------------------------------------\nimport numpy as np\nimport copy\nfrom collections import OrderedDict\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\nfrom .PostProcessor import PostProcessor\nfrom utils import InputData, InputTypes, utils, mathUtils\nimport LearningGate\nimport GridEntities\nimport Files\n#Internal Modules End--------------------------------------------------------------------------------\n\nclass LimitSurface(PostProcessor):\n \"\"\"\n LimitSurface filter class. It computes the limit surface associated to a dataset\n \"\"\"\n\n @classmethod\n def getInputSpecification(cls):\n \"\"\"\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ In, cls, the class for which we are retrieving the specification\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n \"\"\"\n ## This will replace the lines above\n inputSpecification = super(LimitSurface, cls).getInputSpecification()\n\n ParametersInput = InputData.parameterInputFactory(\"parameters\", contentType=InputTypes.StringType)\n inputSpecification.addSub(ParametersInput)\n\n ToleranceInput = InputData.parameterInputFactory(\"tolerance\", contentType=InputTypes.FloatType)\n inputSpecification.addSub(ToleranceInput)\n\n SideInput = InputData.parameterInputFactory(\"side\", contentType=InputTypes.StringType)\n inputSpecification.addSub(SideInput)\n\n ROMInput = InputData.parameterInputFactory(\"ROM\", contentType=InputTypes.StringType)\n ROMInput.addParam(\"class\", InputTypes.StringType)\n ROMInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(ROMInput)\n\n FunctionInput = InputData.parameterInputFactory(\"Function\", contentType=InputTypes.StringType)\n FunctionInput.addParam(\"class\", InputTypes.StringType)\n FunctionInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(FunctionInput)\n\n return inputSpecification\n\n def __init__(self, messageHandler):\n \"\"\"\n Constructor\n @ In, messageHandler, MessageHandler, message handler object\n @ Out, None\n \"\"\"\n PostProcessor.__init__(self,messageHandler)\n self.parameters = {} #parameters dictionary (they are basically stored into a dictionary identified by tag \"targets\"\n self.surfPoint = None #coordinate of the points considered on the limit surface\n self.testMatrix = OrderedDict() #This is the n-dimensional matrix representing the testing grid\n self.gridCoord = {} #Grid coordinates\n self.functionValue = {} #This a dictionary that contains np vectors with the value for each variable and for the goal function\n self.ROM = None #Pointer to a ROM\n self.externalFunction = None #Pointer to an external Function\n self.tolerance = 1.0e-4 #SubGrid tolerance\n self.gridFromOutside = False #The grid has been passed from outside (self._initFromDict)?\n self.lsSide = \"negative\" # Limit surface side to compute the LS for (negative,positive,both)\n self.gridEntity = None # the Grid object\n self.bounds = None # the container for the domain of search\n self.jobHandler = None # job handler pointer\n self.transfMethods = {} # transformation methods container\n self.crossedLimitSurf = False # Limit surface has been crossed?\n self.addAssemblerObject('ROM','-1', True)\n self.addAssemblerObject('Function','1')\n self.printTag = 'POSTPROCESSOR LIMITSURFACE'\n\n def _localWhatDoINeed(self):\n \"\"\"\n This method is a local mirror of the general whatDoINeed method.\n It is implemented by the samplers that need to request special objects\n @ In, None\n @ Out, needDict, dict, list of objects needed\n \"\"\"\n needDict = {'internal':[(None,'jobHandler')]}\n return needDict\n\n def _localGenerateAssembler(self,initDict):\n \"\"\"\n Generates the assembler.\n @ In, initDict, dict, dict of init objects\n @ Out, None\n \"\"\"\n self.jobHandler = initDict['internal']['jobHandler']\n\n def _initializeLSpp(self, runInfo, inputs, initDict):\n \"\"\"\n Method to initialize the LS post processor (create grid, etc.)\n @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)\n @ In, inputs, list, list of inputs\n @ In, initDict, dict, dictionary with initialization options\n @ Out, None\n \"\"\"\n PostProcessor.initialize(self, runInfo, inputs, initDict)\n self.gridEntity = GridEntities.returnInstance(\"MultiGridEntity\",self,self.messageHandler)\n self.externalFunction = self.assemblerDict['Function'][0][3]\n if 'ROM' not in self.assemblerDict.keys():\n self.ROM = LearningGate.returnInstance('SupervisedGate','SciKitLearn', self, **{'SKLtype':'neighbors|KNeighborsClassifier',\"n_neighbors\":1, 'Features':','.join(list(self.parameters['targets'])), 'Target':[self.externalFunction.name]})\n else:\n self.ROM = self.assemblerDict['ROM'][0][3]\n self.ROM.reset()\n self.indexes = -1\n for index, inp in enumerate(self.inputs):\n if mathUtils.isAString(inp) or isinstance(inp, bytes):\n self.raiseAnError(IOError, 'LimitSurface PostProcessor only accepts Data(s) as inputs. Got string type!')\n if inp.type == 'PointSet':\n self.indexes = index\n if self.indexes == -1:\n self.raiseAnError(IOError, 'LimitSurface PostProcessor needs a PointSet as INPUT!!!!!!')\n #else:\n # # check if parameters are contained in the data\n # inpKeys = self.inputs[self.indexes].getParaKeys(\"inputs\")\n # outKeys = self.inputs[self.indexes].getParaKeys(\"outputs\")\n # self.paramType = {}\n # for param in self.parameters['targets']:\n # if param not in inpKeys + outKeys:\n # self.raiseAnError(IOError, 'LimitSurface PostProcessor: The param ' + param + ' not contained in Data ' + self.inputs[self.indexes].name + ' !')\n # if param in inpKeys:\n # self.paramType[param] = 'inputs'\n # else:\n # self.paramType[param] = 'outputs'\n if self.bounds == None:\n dataSet = self.inputs[self.indexes].asDataset()\n self.bounds = {\"lowerBounds\":{},\"upperBounds\":{}}\n for key in self.parameters['targets']:\n self.bounds[\"lowerBounds\"][key], self.bounds[\"upperBounds\"][key] = min(dataSet[key].values), max(dataSet[key].values)\n #self.bounds[\"lowerBounds\"][key], self.bounds[\"upperBounds\"][key] = min(self.inputs[self.indexes].getParam(self.paramType[key],key,nodeId = 'RecontructEnding')), max(self.inputs[self.indexes].getParam(self.paramType[key],key,nodeId = 'RecontructEnding'))\n if utils.compare(round(self.bounds[\"lowerBounds\"][key],14),round(self.bounds[\"upperBounds\"][key],14)):\n self.bounds[\"upperBounds\"][key]+= abs(self.bounds[\"upperBounds\"][key]/1.e7)\n self.gridEntity.initialize(initDictionary={\"rootName\":self.name,'constructTensor':True, \"computeCells\":initDict['computeCells'] if 'computeCells' in initDict.keys() else False,\n \"dimensionNames\":self.parameters['targets'], \"lowerBounds\":self.bounds[\"lowerBounds\"],\"upperBounds\":self.bounds[\"upperBounds\"],\n \"volumetricRatio\":self.tolerance ,\"transformationMethods\":self.transfMethods})\n self.nVar = len(self.parameters['targets']) # Total number of variables\n self.axisName = self.gridEntity.returnParameter(\"dimensionNames\",self.name) # this list is the implicit mapping of the name of the variable with the grid axis ordering self.axisName[i] = name i-th coordinate\n self.testMatrix[self.name] = np.zeros(self.gridEntity.returnParameter(\"gridShape\",self.name)) # grid where the values of the goalfunction are stored\n\n def _initializeLSppROM(self, inp, raiseErrorIfNotFound = True):\n \"\"\"\n Method to initialize the LS acceleration rom\n @ In, inp, Data(s) object, data object containing the training set\n @ In, raiseErrorIfNotFound, bool, throw an error if the limit surface is not found\n @ Out, None\n \"\"\"\n self.raiseADebug('Initiate training')\n if type(inp) == dict:\n self.functionValue.update(inp)\n else:\n dataSet = inp.asDataset(\"dict\")\n self.functionValue.update(dataSet['data'])\n #self.functionValue.update(inp.getParametersValues('outputs', nodeId = 'RecontructEnding'))\n # recovery the index of the last function evaluation performed\n if self.externalFunction.name in self.functionValue.keys():\n indexLast = len(self.functionValue[self.externalFunction.name]) - 1\n else:\n indexLast = -1\n # index of last set of point tested and ready to perform the function evaluation\n indexEnd = len(self.functionValue[self.axisName[0]]) - 1\n tempDict = {}\n if self.externalFunction.name in self.functionValue.keys():\n self.functionValue[self.externalFunction.name] = np.append(self.functionValue[self.externalFunction.name], np.zeros(indexEnd - indexLast))\n else:\n self.functionValue[self.externalFunction.name] = np.zeros(indexEnd + 1)\n\n for myIndex in range(indexLast + 1, indexEnd + 1):\n for key, value in self.functionValue.items():\n tempDict[key] = value[myIndex]\n self.functionValue[self.externalFunction.name][myIndex] = self.externalFunction.evaluate('residuumSign', tempDict)\n if abs(self.functionValue[self.externalFunction.name][myIndex]) != 1.0:\n self.raiseAnError(IOError, 'LimitSurface: the function evaluation of the residuumSign method needs to return a 1 or -1!')\n if type(inp).__name__ in ['dict','OrderedDict']:\n if self.externalFunction.name in inp:\n inp[self.externalFunction.name] = np.concatenate((inp[self.externalFunction.name],np.asarray(self.functionValue[self.externalFunction.name][myIndex])))\n # check if the Limit Surface has been crossed\n self.crossedLimitSurf = not (np.sum(self.functionValue[self.externalFunction.name]) ==\n float(len(self.functionValue[self.externalFunction.name])) or\n np.sum(self.functionValue[self.externalFunction.name]) ==\n -float(len(self.functionValue[self.externalFunction.name])))\n if not self.crossedLimitSurf:\n if raiseErrorIfNotFound:\n self.raiseAnError(ValueError, 'LimitSurface: all the Function evaluations brought to the same result (No Limit Surface has been crossed...). Increase or change the data set!')\n else:\n self.raiseAWarning('LimitSurface: all the Function evaluations brought to the same result (No Limit Surface has been crossed...)!')\n #printing----------------------\n self.raiseADebug('LimitSurface: Mapping of the goal function evaluation performed')\n self.raiseADebug('LimitSurface: Already evaluated points and function values:')\n keyList = list(self.functionValue.keys())\n self.raiseADebug(','.join(keyList))\n for index in range(indexEnd + 1):\n self.raiseADebug(','.join([str(self.functionValue[key][index]) for key in keyList]))\n #printing----------------------\n tempDict = {}\n for name in self.axisName:\n tempDict[name] = np.asarray(self.functionValue[name])\n tempDict[self.externalFunction.name] = self.functionValue[self.externalFunction.name]\n self.ROM.train(tempDict)\n self.raiseADebug('LimitSurface: Training performed')\n\n def initialize(self, runInfo, inputs, initDict):\n \"\"\"\n Method to initialize the LS pp.\n @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)\n @ In, inputs, list, list of inputs\n @ In, initDict, dict, dictionary with initialization options\n @ Out, None\n \"\"\"\n self._initializeLSpp(runInfo, inputs, initDict)\n self._initializeLSppROM(self.inputs[self.indexes])\n\n def _initFromDict(self, dictIn):\n \"\"\"\n Initialize the LS pp from a dictionary (not from xml input).\n This is used when other objects initialize and use the LS pp for internal\n calculations\n @ In, dictIn, dict, dictionary of initialization options\n @ Out, None\n \"\"\"\n if \"parameters\" not in dictIn.keys():\n self.raiseAnError(IOError, 'No Parameters specified in \"dictIn\" dictionary !!!!')\n if \"name\" in dictIn.keys():\n self.name = dictIn[\"name\"]\n if type(dictIn[\"parameters\"]).__name__ == \"list\":\n self.parameters['targets'] = dictIn[\"parameters\"]\n else:\n self.parameters['targets'] = dictIn[\"parameters\"].split(\",\")\n if \"bounds\" in dictIn.keys():\n self.bounds = dictIn[\"bounds\"]\n if \"transformationMethods\" in dictIn.keys():\n self.transfMethods = dictIn[\"transformationMethods\"]\n if \"verbosity\" in dictIn.keys():\n self.verbosity = dictIn['verbosity']\n if \"side\" in dictIn.keys():\n self.lsSide = dictIn[\"side\"]\n if \"tolerance\" in dictIn.keys():\n self.tolerance = float(dictIn[\"tolerance\"])\n if self.lsSide not in [\"negative\", \"positive\", \"both\"]:\n self.raiseAnError(IOError, 'Computation side can be positive, negative, both only !!!!')\n\n def getFunctionValue(self):\n \"\"\"\n Method to get a pointer to the dictionary self.functionValue\n @ In, None\n @ Out, dictionary, self.functionValue\n \"\"\"\n return self.functionValue\n\n def getTestMatrix(self, nodeName=None,exceptionGrid=None):\n \"\"\"\n Method to get a pointer to the testMatrix object (evaluation grid)\n @ In, nodeName, string, optional, which grid node should be returned. If None, the self.name one, If \"all\", all of theme, else the nodeName\n @ In, exceptionGrid, string, optional, which grid node should should not returned in case nodeName is \"all\"\n @ Out, testMatrix, numpy.ndarray or dict , self.testMatrix\n \"\"\"\n if nodeName == None:\n testMatrix = self.testMatrix[self.name]\n elif nodeName ==\"all\":\n if exceptionGrid == None:\n testMatrix = self.testMatrix\n else:\n returnDict = OrderedDict()\n wantedKeys = list(self.testMatrix.keys())\n wantedKeys.pop(wantedKeys.index(exceptionGrid))\n for key in wantedKeys:\n returnDict[key] = self.testMatrix[key]\n testMatrix = returnDict\n else:\n testMatrix = self.testMatrix[nodeName]\n return testMatrix\n\n def _localReadMoreXML(self, xmlNode):\n \"\"\"\n Function to read the portion of the xml input that belongs to this specialized class\n and initialize some stuff based on the inputs got\n @ In, xmlNode, xml.etree.Element, Xml element node\n @ Out, None\n \"\"\"\n paramInput = LimitSurface.getInputSpecification()()\n paramInput.parseNode(xmlNode)\n self._handleInput(paramInput)\n\n def _handleInput(self, paramInput):\n \"\"\"\n Function to handle the parsed paramInput for this class.\n @ In, paramInput, ParameterInput, the already parsed input.\n @ Out, None\n \"\"\"\n initDict = {}\n for child in paramInput.subparts:\n initDict[child.getName()] = child.value\n initDict.update(paramInput.parameterValues)\n self._initFromDict(initDict)\n\n def collectOutput(self, finishedJob, output):\n \"\"\"\n Function to place all of the computed data into the output object\n @ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor\n @ In, output, dataObjects, The object where we want to place our computed results\n @ Out, None\n \"\"\"\n evaluation = finishedJob.getEvaluation()\n self.raiseADebug(str(evaluation))\n limitSurf = evaluation[1]\n if limitSurf[0] is not None:\n # reset the output\n if len(output) > 0:\n self.raiseAnError(RuntimeError, 'The output DataObject \"'+output.name+'\" is not empty! Chose another one!')\n #output.reset()\n # construct the realizations dict\n rlz = {varName: limitSurf[0][:,varIndex] for varIndex,varName in enumerate(self.axisName) }\n rlz[self.externalFunction.name] = limitSurf[1]\n # add the full realizations\n output.load(rlz,style='dict')\n\n def refineGrid(self,refinementSteps=2):\n \"\"\"\n Method to refine the internal grid based on the limit surface previously computed\n @ In, refinementSteps, int, optional, number of refinement steps\n @ Out, None\n \"\"\"\n cellIds = self.gridEntity.retrieveCellIds([self.listSurfPointNegative,self.listSurfPointPositive],self.name)\n if self.getLocalVerbosity() == 'debug':\n self.raiseADebug(\"Limit Surface cell IDs are: \\n\"+ \" \\n\".join([str(cellID) for cellID in cellIds]))\n self.raiseAMessage(\"Number of cells to be refined are \"+str(len(cellIds))+\". RefinementSteps = \"+str(max([refinementSteps,2]))+\"!\")\n self.gridEntity.refineGrid({\"cellIDs\":cellIds,\"refiningNumSteps\":int(max([refinementSteps,2]))})\n for nodeName in self.gridEntity.getAllNodesNames(self.name):\n if nodeName != self.name:\n self.testMatrix[nodeName] = np.zeros(self.gridEntity.returnParameter(\"gridShape\",nodeName))\n\n def run(self, inputIn = None, returnListSurfCoord = False, exceptionGrid = None, merge = True):\n \"\"\"\n This method executes the postprocessor action. In this case it computes the limit surface.\n @ In, inputIn, dict, optional, dictionary of data to process\n @ In, returnListSurfCoord, bool, optional, True if listSurfaceCoordinate needs to be returned\n @ In, exceptionGrid, string, optional, the name of the sub-grid to not be considered\n @ In, merge, bool, optional, True if the LS in all the sub-grids need to be merged in a single returnSurface\n @ Out, returnSurface, tuple, tuple containing the limit surface info:\n - if returnListSurfCoord: returnSurface = (surfPoint, evals, listSurfPoints)\n - else : returnSurface = (surfPoint, evals)\n \"\"\"\n allGridNames = self.gridEntity.getAllNodesNames(self.name)\n if exceptionGrid != None:\n try:\n allGridNames.pop(allGridNames.index(exceptionGrid))\n except:\n pass\n self.surfPoint, evaluations, listSurfPoint = OrderedDict().fromkeys(allGridNames), OrderedDict().fromkeys(allGridNames) ,OrderedDict().fromkeys(allGridNames)\n for nodeName in allGridNames:\n #if skipMainGrid == True and nodeName == self.name: continue\n self.testMatrix[nodeName] = np.zeros(self.gridEntity.returnParameter(\"gridShape\",nodeName))\n self.gridCoord[nodeName] = self.gridEntity.returnGridAsArrayOfCoordinates(nodeName=nodeName)\n tempDict ={}\n for varId, varName in enumerate(self.axisName):\n tempDict[varName] = self.gridCoord[nodeName][:,varId]\n self.testMatrix[nodeName].shape = (self.gridCoord[nodeName].shape[0]) #rearrange the grid matrix such as is an array of values\n self.testMatrix[nodeName][:] = self.ROM.evaluate(tempDict)[self.externalFunction.name] #get the prediction on the testing grid\n self.testMatrix[nodeName].shape = self.gridEntity.returnParameter(\"gridShape\",nodeName) #bring back the grid structure\n self.gridCoord[nodeName].shape = self.gridEntity.returnParameter(\"gridCoorShape\",nodeName) #bring back the grid structure\n self.raiseADebug('LimitSurface: Prediction performed')\n # here next the points that are close to any change are detected by a gradient (it is a pre-screener)\n if self.nVar > 1:\n toBeTested = np.squeeze(np.dstack(np.nonzero(np.sum(np.abs(np.gradient(self.testMatrix[nodeName])), axis = 0))))\n else:\n toBeTested = np.squeeze(np.dstack(np.nonzero(np.abs(np.gradient(self.testMatrix[nodeName])))))\n toBeTested = np.atleast_2d(toBeTested).T if self.nVar == 1 else toBeTested\n #printing----------------------\n self.raiseADebug('LimitSurface: Limit surface candidate points')\n if self.getLocalVerbosity() == 'debug':\n for coordinate in np.rollaxis(toBeTested, 0):\n myStr = ''\n for iVar, varnName in enumerate(self.axisName):\n myStr += varnName + ': ' + str(coordinate[iVar]) + ' '\n self.raiseADebug('LimitSurface: ' + myStr + ' value: ' + str(self.testMatrix[nodeName][tuple(coordinate)]))\n # printing----------------------\n # check which one of the preselected points is really on the limit surface\n nNegPoints, nPosPoints = 0, 0\n listSurfPointNegative, listSurfPointPositive = [], []\n if self.lsSide in [\"negative\", \"both\"]:\n # it returns the list of points belonging to the limit state surface and resulting in a negative response by the ROM\n listSurfPointNegative = self.__localLimitStateSearch__(toBeTested, -1, nodeName)\n nNegPoints = len(listSurfPointNegative)\n if self.lsSide in [\"positive\", \"both\"]:\n # it returns the list of points belonging to the limit state surface and resulting in a positive response by the ROM\n listSurfPointPositive = self.__localLimitStateSearch__(toBeTested, 1, nodeName)\n nPosPoints = len(listSurfPointPositive)\n listSurfPoint[nodeName] = listSurfPointNegative + listSurfPointPositive\n #printing----------------------\n if self.getLocalVerbosity() == 'debug':\n if len(listSurfPoint[nodeName]) > 0:\n self.raiseADebug('LimitSurface: Limit surface points:')\n for coordinate in listSurfPoint[nodeName]:\n myStr = ''\n for iVar, varnName in enumerate(self.axisName):\n myStr += varnName + ': ' + str(coordinate[iVar]) + ' '\n self.raiseADebug('LimitSurface: ' + myStr + ' value: ' + str(self.testMatrix[nodeName][tuple(coordinate)]))\n # if the number of point on the limit surface is > than zero than save it\n if len(listSurfPoint[nodeName]) > 0:\n self.surfPoint[nodeName] = np.ndarray((len(listSurfPoint[nodeName]), self.nVar))\n evaluations[nodeName] = np.concatenate((-np.ones(nNegPoints), np.ones(nPosPoints)), axis = 0)\n for pointID, coordinate in enumerate(listSurfPoint[nodeName]):\n self.surfPoint[nodeName][pointID, :] = self.gridCoord[nodeName][tuple(coordinate)]\n if self.name != exceptionGrid:\n self.listSurfPointNegative, self.listSurfPointPositive = listSurfPoint[self.name][:nNegPoints-1],listSurfPoint[self.name][nNegPoints:]\n if merge == True:\n evals = np.hstack(evaluations.values())\n listSurfPoints = np.hstack(listSurfPoint.values())\n surfPoint = np.hstack(self.surfPoint.values())\n returnSurface = (surfPoint, evals, listSurfPoints) if returnListSurfCoord else (surfPoint, evals)\n else:\n returnSurface = (self.surfPoint, evaluations, listSurfPoint) if returnListSurfCoord else (self.surfPoint, evaluations)\n return returnSurface\n\n def __localLimitStateSearch__(self, toBeTested, sign, nodeName):\n \"\"\"\n It returns the list of points belonging to the limit state surface and resulting in\n positive or negative responses by the ROM, depending on whether ''sign''\n equals either -1 or 1, respectively.\n @ In, toBeTested, np.ndarray, the nodes to be tested\n @ In, sign, int, the sign that should be tested (-1 or +1)\n @ In, nodeName, string, the sub-grid name\n @ Out, listSurfPoint, list, the list of limit surface coordinates\n \"\"\"\n #TODO: This approach might be extremely slow. It must be replaced with matrix operations\n listSurfPoint = []\n gridShape = self.gridEntity.returnParameter(\"gridShape\",nodeName)\n myIdList = np.zeros(self.nVar,dtype=int)\n putIt = np.zeros(self.nVar,dtype=bool)\n for coordinate in np.rollaxis(toBeTested, 0):\n myIdList[:] = coordinate\n putIt[:] = False\n if self.testMatrix[nodeName][tuple(coordinate)] * sign > 0:\n for iVar in range(self.nVar):\n if coordinate[iVar] + 1 < gridShape[iVar]:\n myIdList[iVar] += 1\n if self.testMatrix[nodeName][tuple(myIdList)] * sign <= 0:\n putIt[iVar] = True\n listSurfPoint.append(copy.copy(coordinate))\n break\n myIdList[iVar] -= 1\n if coordinate[iVar] > 0:\n myIdList[iVar] -= 1\n if self.testMatrix[nodeName][tuple(myIdList)] * sign <= 0:\n putIt[iVar] = True\n listSurfPoint.append(copy.copy(coordinate))\n break\n myIdList[iVar] += 1\n #if len(set(putIt)) == 1 and list(set(putIt))[0] == True: listSurfPoint.append(copy.copy(coordinate))\n return listSurfPoint\n"
]
| [
[
"numpy.asarray",
"numpy.zeros",
"numpy.sum",
"numpy.rollaxis",
"numpy.ones",
"numpy.gradient",
"numpy.atleast_2d"
]
]
|
kourouklides-ImpacTech/artificial_neural_networks | [
"83c37f7669fe1aa2bac92cce4bb7549e1e67d32f"
]
| [
"code/architectures/feedforward_neural_networks/standard_neural_networks/snn_dense_mnist.py"
]
| [
"\"\"\"\r\n\r\nModel: Standard Neural Network (SNN) with dense (i.e. fully connected) layers\r\nMethod: Backpropagation\r\nArchitecture: Feedforward Neural Network\r\n\r\nDataset: MNIST\r\nTask: Handwritten Digit Recognition (Multi-class Classification)\r\n\r\n Author: Ioannis Kourouklides, www.kourouklides.com\r\n License: https://github.com/kourouklides/artificial_neural_networks/blob/master/LICENSE\r\n\r\n\"\"\"\r\n# %%\r\n# Python configurations\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport random as rn\r\n\r\nfrom keras import optimizers\r\nfrom keras.layers import Input, Dense\r\nfrom keras.models import Model\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.utils import to_categorical\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nimport itertools\r\n\r\nimport json\r\nimport yaml\r\n\r\nimport argparse\r\n\r\nimport os\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# %%\r\n\r\n\r\ndef none_or_int(value):\r\n if value == 'None':\r\n return None\r\n else:\r\n return int(value)\r\n\r\n\r\ndef none_or_float(value):\r\n if value == 'None':\r\n return None\r\n else:\r\n return float(value)\r\n\r\n\r\ndef snn_dense_mnist(new_dir=os.getcwd()):\r\n\r\n os.chdir(new_dir)\r\n\r\n from artificial_neural_networks.code.utils.download_mnist import download_mnist\r\n\r\n # SETTINGS\r\n parser = argparse.ArgumentParser()\r\n\r\n # General settings\r\n parser.add_argument('--verbose', type=int, default=1)\r\n parser.add_argument('--reproducible', type=bool, default=True)\r\n parser.add_argument('--seed', type=int, default=0)\r\n parser.add_argument('--plot', type=bool, default=False)\r\n\r\n # Settings for preprocessing and hyperparameters\r\n parser.add_argument('--scaling_factor', type=float, default=(255/255))\r\n parser.add_argument('--translation', type=float, default=0)\r\n parser.add_argument('--same_size', type=bool, default=True)\r\n parser.add_argument('--n_layers', type=int, default=2)\r\n parser.add_argument('--layer_size', type=int, default=128)\r\n parser.add_argument('--explicit_layer_sizes', nargs='*', type=int, default=[128, 128])\r\n parser.add_argument('--n_epochs', type=int, default=50)\r\n parser.add_argument('--batch_size', type=none_or_int, default=512)\r\n parser.add_argument('--optimizer', type=str, default='RMSprop')\r\n parser.add_argument('--lrearning_rate', type=float, default=1e-2)\r\n parser.add_argument('--epsilon', type=none_or_float, default=1e0)\r\n\r\n # Settings for saving the model\r\n parser.add_argument('--save_architecture', type=bool, default=True)\r\n parser.add_argument('--save_last_weights', type=bool, default=True)\r\n parser.add_argument('--save_last_model', type=bool, default=True)\r\n parser.add_argument('--save_models', type=bool, default=False)\r\n parser.add_argument('--save_weights_only', type=bool, default=False)\r\n parser.add_argument('--save_best', type=bool, default=False)\r\n\r\n args = parser.parse_args()\r\n\r\n if (args.verbose > 0):\r\n print(args)\r\n\r\n # For reproducibility\r\n if (args.reproducible):\r\n os.environ['PYTHONHASHSEED'] = '0'\r\n np.random.seed(args.seed)\r\n rn.seed(args.seed)\r\n tf.set_random_seed(args.seed)\r\n\r\n # %%\r\n # Load the MNIST dataset\r\n\r\n mnist_path = download_mnist()\r\n mnist = np.load(mnist_path)\r\n train_x = mnist['x_train'].astype(np.float32)\r\n train_y = mnist['y_train'].astype(np.int32)\r\n test_x = mnist['x_test'].astype(np.float32)\r\n test_y = mnist['y_test'].astype(np.int32)\r\n mnist.close()\r\n\r\n # %%\r\n # PREPROCESSING STEP\r\n\r\n scaling_factor = args.scaling_factor\r\n translation = args.translation\r\n\r\n img_width = train_x.shape[1]\r\n img_height = train_x.shape[2]\r\n\r\n n_train = train_x.shape[0] # number of training examples/samples\r\n n_test = test_x.shape[0] # number of test examples/samples\r\n\r\n n_in = img_width * img_height # number of features / dimensions\r\n n_out = np.unique(train_y).shape[0] # number of classes/labels\r\n\r\n # Reshape training and test sets\r\n train_x = train_x.reshape(n_train, n_in)\r\n test_x = test_x.reshape(n_test, n_in)\r\n\r\n # Apply preprocessing\r\n train_x = scaling_factor * (train_x - translation)\r\n test_x = scaling_factor * (test_x - translation)\r\n\r\n one_hot = False # It works exactly the same for both True and False\r\n\r\n # Convert class vectors to binary class matrices (i.e. One hot encoding)\r\n if (one_hot):\r\n train_y = to_categorical(train_y, n_out)\r\n test_y = to_categorical(test_y, n_out)\r\n\r\n # %%\r\n # Model hyperparameters\r\n\r\n N = []\r\n N.append(n_in) # input layer\r\n if (args.same_size):\r\n n_layers = args.n_layers\r\n for i in range(n_layers):\r\n N.append(args.layer_size) # hidden layer i\r\n else:\r\n n_layers = len(args.explicit_layer_sizes)\r\n for i in range(n_layers):\r\n N.append(args.explicit_layer_sizes[i]) # hidden layer i\r\n N.append(n_out) # output layer\r\n\r\n # ANN Architecture\r\n L = len(N) - 1\r\n\r\n x = Input(shape=(n_in,)) # input layer\r\n h = x\r\n\r\n for i in range(1, L):\r\n h = Dense(units=N[i], activation='relu')(h) # hidden layer i\r\n\r\n out = Dense(units=n_out, activation='softmax')(h) # output layer\r\n\r\n model = Model(inputs=x, outputs=out)\r\n\r\n if (args.verbose > 0):\r\n model.summary()\r\n\r\n if (one_hot):\r\n loss_function = 'categorical_crossentropy'\r\n else:\r\n loss_function = 'sparse_categorical_crossentropy'\r\n\r\n metrics = ['accuracy']\r\n\r\n lr = args.lrearning_rate\r\n epsilon = args.epsilon\r\n optimizer_selection = {'Adadelta': optimizers.Adadelta(\r\n lr=lr, rho=0.95, epsilon=epsilon, decay=0.0),\r\n 'Adagrad': optimizers.Adagrad(\r\n lr=lr, epsilon=epsilon, decay=0.0),\r\n 'Adam': optimizers.Adam(\r\n lr=lr, beta_1=0.9, beta_2=0.999,\r\n epsilon=epsilon, decay=0.0, amsgrad=False),\r\n 'Adamax': optimizers.Adamax(\r\n lr=lr, beta_1=0.9, beta_2=0.999,\r\n epsilon=epsilon, decay=0.0),\r\n 'Nadam': optimizers.Nadam(\r\n lr=lr, beta_1=0.9, beta_2=0.999,\r\n epsilon=epsilon, schedule_decay=0.004),\r\n 'RMSprop': optimizers.RMSprop(\r\n lr=lr, rho=0.9, epsilon=epsilon, decay=0.0),\r\n 'SGD': optimizers.SGD(\r\n lr=lr, momentum=0.0, decay=0.0, nesterov=False)}\r\n\r\n optimizer = optimizer_selection[args.optimizer]\r\n\r\n model.compile(optimizer=optimizer,\r\n loss=loss_function,\r\n metrics=metrics)\r\n\r\n # %%\r\n # Save trained models for every epoch\r\n\r\n models_path = r'artificial_neural_networks/trained_models/'\r\n model_name = 'mnist_snn_dense'\r\n weights_path = models_path + model_name + '_weights'\r\n model_path = models_path + model_name + '_model'\r\n file_suffix = '_{epoch:04d}_{val_acc:.4f}_{val_loss:.4f}'\r\n\r\n if (args.save_weights_only):\r\n file_path = weights_path\r\n else:\r\n file_path = model_path\r\n\r\n file_path += file_suffix\r\n\r\n # monitor='val_loss'\r\n monitor = 'val_acc'\r\n\r\n if (args.save_models):\r\n checkpoint = ModelCheckpoint(file_path + '.h5',\r\n monitor=monitor,\r\n verbose=args.verbose,\r\n save_best_only=args.save_best_only,\r\n mode='auto',\r\n save_weights_only=args.save_weights_only)\r\n callbacks = [checkpoint]\r\n else:\r\n callbacks = []\r\n\r\n # %%\r\n # TRAINING PHASE\r\n\r\n model_history = model.fit(x=train_x, y=train_y,\r\n validation_data=(test_x, test_y),\r\n batch_size=args.batch_size,\r\n epochs=args.n_epochs,\r\n verbose=args.verbose,\r\n callbacks=callbacks)\r\n\r\n # %%\r\n # TESTING PHASE\r\n\r\n train_y_pred = np.argmax(model.predict(train_x), axis=1)\r\n test_y_pred = np.argmax(model.predict(test_x), axis=1)\r\n\r\n train_score = model.evaluate(x=train_x, y=train_y, verbose=args.verbose)\r\n train_dict = {'val_loss': train_score[0], 'val_acc': train_score[1]}\r\n\r\n test_score = model.evaluate(x=test_x, y=test_y, verbose=args.verbose)\r\n test_dict = {'val_loss': test_score[0], 'val_acc': test_score[1]}\r\n\r\n if (args.verbose > 0):\r\n print('Train loss:', train_dict['val_loss'])\r\n print('Train accuracy:', train_dict['val_acc'])\r\n\r\n print('Test loss:', test_dict['val_loss'])\r\n print('Test accuracy:', test_dict['val_acc'])\r\n\r\n # %%\r\n # Data Visualization\r\n\r\n def plot_confusion_matrix(cm, classes, title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n plt.figure()\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment='center',\r\n color='white' if cm[i, j] > thresh else 'black')\r\n\r\n plt.ylabel('Actual label')\r\n plt.xlabel('Predicted label')\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n if (args.plot):\r\n train_cm = confusion_matrix(train_y, train_y_pred)\r\n test_cm = confusion_matrix(test_y, test_y_pred)\r\n\r\n classes = list(range(n_out))\r\n\r\n plot_confusion_matrix(train_cm, classes=classes,\r\n title='Confusion matrix for training set')\r\n plot_confusion_matrix(test_cm, classes=classes,\r\n title='Confusion matrix for test set')\r\n\r\n # %%\r\n # Save the architecture and the lastly trained model\r\n\r\n architecture_path = models_path + model_name + '_architecture'\r\n\r\n last_suffix = file_suffix.format(epoch=args.n_epochs,\r\n val_acc=test_dict['val_acc'],\r\n val_loss=test_dict['val_loss'])\r\n\r\n if (args.save_architecture):\r\n # Save only the archtitecture (as a JSON file)\r\n json_string = model.to_json()\r\n json.dump(json.loads(json_string), open(architecture_path + '.json', \"w\"))\r\n\r\n # Save only the archtitecture (as a YAML file)\r\n yaml_string = model.to_yaml()\r\n yaml.dump(yaml.load(yaml_string), open(architecture_path + '.yml', \"w\"))\r\n\r\n # Save only the weights (as an HDF5 file)\r\n if (args.save_last_weights):\r\n model.save_weights(weights_path + last_suffix + '.h5')\r\n\r\n # Save the whole model (as an HDF5 file)\r\n if (args.save_last_model):\r\n model.save(model_path + last_suffix + '.h5')\r\n\r\n return model\r\n\r\n# %%\r\n\r\n\r\nif __name__ == '__main__':\r\n model = snn_dense_mnist('../../../../../')\r\n"
]
| [
[
"tensorflow.set_random_seed",
"matplotlib.pyplot.colorbar",
"sklearn.metrics.confusion_matrix",
"numpy.random.seed",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"numpy.unique",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.imshow"
]
]
|
sgeorgiev255/glow | [
"d0d5c45d5487b77027875930acc56a32f867d8bb"
]
| [
"torch_glow/tests/nodes/quantized_batchnorm3d_relu_test.py"
]
| [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport torch\nimport torch.nn as nn\nfrom tests import utils\nfrom torch.quantization import (\n DeQuantStub,\n QConfig,\n QuantStub,\n convert,\n fuse_modules,\n observer,\n prepare,\n)\n\n\nmy_qconfig = QConfig(\n activation=observer.default_observer,\n weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),\n)\n\n\nclass TestQuantizedBatchNorm3DRelu(unittest.TestCase):\n def test_batchnorm_relu_basic(self):\n \"\"\"\n Basic test of the PyTorch 3D batchnorm RELU Node on Glow.\n \"\"\"\n\n class SimpleQuantizedBatchNormRelu(nn.Module):\n def __init__(self, w, b, m, v):\n super(SimpleQuantizedBatchNormRelu, self).__init__()\n self.bn = torch.nn.BatchNorm3d(4)\n self.relu = torch.nn.ReLU()\n self.bn.weight = torch.nn.Parameter(w)\n self.bn.bias = torch.nn.Parameter(b)\n self.bn.running_mean = m\n self.bn.running_var = v\n self.q = QuantStub()\n self.dq = DeQuantStub()\n\n def forward(self, x):\n qx = self.q(x)\n qy = self.bn(qx)\n qy_relu = self.relu(qy)\n y = self.dq(qy_relu)\n return y\n\n C = 4\n weight = torch.ones(C) + torch.rand(C) * 0.001\n bias = torch.rand(C) * 0.0001\n running_mean = torch.zeros(C)\n running_var = torch.ones(C)\n\n inputs = torch.randn((10, C, 2, 3, 4), requires_grad=False)\n model = SimpleQuantizedBatchNormRelu(weight, bias, running_mean, running_var)\n model.eval()\n model.qconfig = my_qconfig\n modules_to_fuse = [[\"bn\", \"relu\"]]\n fuse_modules(model, modules_to_fuse, inplace=True)\n prepare(model, inplace=True)\n model.forward(inputs)\n convert(model, inplace=True)\n\n # Because of the difference of quantization between PyTorch & Glow\n # We set eps big enough.\n # Batchnorm introduced great accuracy issues, which could create up to\n # ~1e-2 difference in some rare cases. In order to prevent this test\n # to be flaky, atol is set to be 0.1.\n utils.compare_tracing_methods(\n model,\n inputs,\n fusible_ops={\"quantized::batch_norm3d_relu\"},\n atol=1e-1,\n fp16=True,\n skip_to_glow=True,\n )\n"
]
| [
[
"torch.zeros",
"torch.rand",
"torch.quantization.convert",
"torch.quantization.fuse_modules",
"torch.quantization.observer.HistogramObserver.with_args",
"torch.quantization.QuantStub",
"torch.quantization.prepare",
"torch.quantization.DeQuantStub",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Parameter",
"torch.nn.BatchNorm3d",
"torch.randn"
]
]
|
nie-lang/Multi-Grid-Deep-Homography | [
"ba3b3820ba639829797be6cc81ddde4a19e5cd82"
]
| [
"Codes/tf_spatial_transform_local.py"
]
| [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tensorflow as tf\nimport numpy as np\nimport math\nimport tensorDLT_local\n\nimport constant\ngrid_w = constant.GRID_W\ngrid_h = constant.GRID_H\n\ndef transformer(U, im_one, depth, theta, name='SpatialTransformer', **kwargs):\n\n\n def _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n rep = tf.transpose(\n tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])\n rep = tf.cast(rep, 'int32')\n x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n return tf.reshape(x, [-1])\n\n def _interpolate(im, x, y, out_size):\n with tf.variable_scope('_interpolate'):\n # constants\n num_batch = tf.shape(im)[0]\n height = tf.shape(im)[1]\n width = tf.shape(im)[2]\n channels = tf.shape(im)[3]\n\n x = tf.cast(x, 'float32')\n y = tf.cast(y, 'float32')\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')\n max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')\n\n # scale indices from [-1, 1] to [0, width/height]\n x = (x + 1.0)*(width_f) / 2.0\n y = (y + 1.0)*(height_f) / 2.0\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = width\n dim1 = width*height\n base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)\n base_y0 = base + y0*dim2\n base_y1 = base + y1*dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n Ia = tf.gather(im_flat, idx_a)\n Ib = tf.gather(im_flat, idx_b)\n Ic = tf.gather(im_flat, idx_c)\n Id = tf.gather(im_flat, idx_d)\n\n # and finally calculate interpolated values\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)\n wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)\n wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)\n wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)\n output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])\n return output\n\n\n\n\n #input: batch_size*(grid_h+1)*(grid_w+1)*2\n #output: batch_size*grid_h*grid_w*9\n def get_Hs(theta, patch = 512.): \n with tf.variable_scope('get_Hs'):\n num_batch = tf.shape(theta)[0]\n h = patch / grid_h\n w = patch / grid_w\n Hs = []\n for i in range(grid_h):\n for j in range(grid_w):\n hh = i * h\n ww = j * w\n ori = tf.tile(tf.constant([ww, hh, ww + w, hh, ww, hh + h, ww + w, hh + h], shape=[1, 8], dtype=tf.float32), multiples=[num_batch, 1])\n #id = i * (grid_w + 1) + grid_w\n tar = tf.concat([tf.slice(theta, [0, i, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i, j + 1, 0], [-1, 1, 1, -1]), \n tf.slice(theta, [0, i + 1, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i + 1, j + 1, 0], [-1, 1, 1, -1])], axis=1)\n tar = tf.reshape(tar, [num_batch, 8])\n Hs.append(tf.reshape(tensorDLT_local.solve_DLT(ori, tar), [num_batch, 1, 9])) \n Hs = tf.reshape(tf.concat(Hs, axis=1), [num_batch, grid_h, grid_w, 9], name='Hs')\n return Hs \n\n def _meshgrid2(height, width, sh, eh, sw, ew):\n hn = eh - sh + 1\n wn = ew - sw + 1\n \n \n x_t = tf.matmul(tf.ones(shape=tf.stack([hn, 1])),\n tf.transpose(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, width), [sw], [wn]), 1), [1, 0]))\n y_t = tf.matmul(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, height), [sh], [hn]), 1),\n tf.ones(shape=tf.stack([1, wn])))\n\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n\n ones = tf.ones_like(x_t_flat)\n grid = tf.concat([x_t_flat, y_t_flat, ones], 0)\n return grid\n\n\n\n def _transform3(theta, input_dim, im_one, depth):\n with tf.variable_scope('_transform'):\n num_batch = tf.shape(input_dim)[0]\n height = tf.shape(input_dim)[1]\n width = tf.shape(input_dim)[2]\n num_channels = tf.shape(input_dim)[3]\n \n patch_size = 512.\n M = np.array([[patch_size / 2.0, 0., patch_size / 2.0],\n [0., patch_size / 2.0, patch_size / 2.0],\n [0., 0., 1.]]).astype(np.float32)\n M_tensor = tf.constant(M, tf.float32)\n M_tile = tf.tile(tf.expand_dims(M_tensor, [0]), [num_batch, 1, 1])\n M_inv = np.linalg.inv(M)\n M_tensor_inv = tf.constant(M_inv, tf.float32)\n M_tile_inv = tf.tile(tf.expand_dims(M_tensor_inv, [0]), [num_batch, 1, 1])\n \n theta = tf.cast(theta, 'float32')\n Hs = get_Hs(theta, patch_size)\n print(\"!@#$%^==========================\")\n print(Hs)\n print(\"!@#$%^==========================\")\n gh = tf.cast(height / grid_h, 'int32')\n gw =tf.cast(width / grid_w, 'int32')\n\n x_ = []\n y_ = []\n for i in range(grid_h):\n row_x_ = []\n row_y_ = []\n for j in range(grid_w):\n H = tf.reshape(tf.slice(Hs, [0, i, j, 0], [-1, 1, 1, -1]), [num_batch, 3, 3])\n H = tf.matmul(tf.matmul(M_tile_inv, H), M_tile)\n sh = i * gh\n eh = (i + 1) * gh - 1\n sw = j * gw\n ew = (j + 1) * gw - 1\n if (i == grid_h - 1):\n eh = height - 1\n if (j == grid_w - 1):\n ew = width - 1\n grid = _meshgrid2(height, width, sh, eh, sw, ew)\n grid = tf.expand_dims(grid, 0)\n grid = tf.tile(grid, [num_batch, 1, 1])\n\n T_g = tf.matmul(H, grid)\n x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])\n z_s = tf.slice(T_g, [0, 2, 0], [-1, 1, -1])\n \n z_s_flat = tf.reshape(z_s, [-1])\n t_1 = tf.ones(shape = tf.shape(z_s_flat))\n t_0 = tf.zeros(shape = tf.shape(z_s_flat)) \n\n sign_z_flat = tf.where(z_s_flat >= 0, t_1, t_0) * 2 - 1\n z_s_flat = tf.reshape(z_s, [-1]) + sign_z_flat * 1e-8\n x_s_flat = tf.reshape(x_s, [-1]) / z_s_flat\n y_s_flat = tf.reshape(y_s, [-1]) / z_s_flat\n\n x_s = tf.reshape(x_s_flat, [num_batch, eh - sh + 1, ew - sw + 1])\n y_s = tf.reshape(y_s_flat, [num_batch, eh - sh + 1, ew - sw + 1])\n row_x_.append(x_s)\n row_y_.append(y_s)\n row_x = tf.concat(row_x_, axis=2)\n row_y = tf.concat(row_y_, axis=2)\n x_.append(row_x)\n y_.append(row_y)\n\n x = tf.reshape(tf.concat(x_, axis=1), [num_batch, height, width, 1], name='x_map')\n y = tf.reshape(tf.concat(y_, axis=1), [num_batch, height, width, 1], name='y_map')\n\n print('===============xy===========')\n print(x)\n print(y)\n\n img = tf.concat([x, y], axis=3)\n x_s_flat = tf.reshape(x, [-1])\n y_s_flat = tf.reshape(y, [-1])\n\n\n out_size = (height, width)\n input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)\n mask_transformed = _interpolate(im_one, x_s_flat, y_s_flat, out_size)\n warp2_depth_transformed = _interpolate(depth, x_s_flat, y_s_flat, out_size)\n\n output = tf.reshape(input_transformed, tf.stack([num_batch, height, width, num_channels]), name='output_img')\n mask_output = tf.reshape(mask_transformed, tf.stack([num_batch, height, width, num_channels]), name='output_mask')\n warp2_depth = tf.reshape(warp2_depth_transformed, tf.stack([num_batch, height, width, num_channels]), name='warp2_depth')\n \n print(\"!@#$%^===output/black_pix=======================\")\n print(output)\n print(\"!@#$%^==========================\")\n return output, mask_output, warp2_depth\n\n \n with tf.variable_scope(name):\n output, mask_output, warp2_depth = _transform3(theta, U, im_one, depth)\n return output, mask_output, warp2_depth\n\n\n"
]
| [
[
"tensorflow.ones_like",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.clip_by_value",
"tensorflow.stack",
"tensorflow.tile",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.linalg.inv",
"tensorflow.floor",
"numpy.array",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.where",
"tensorflow.linspace",
"tensorflow.gather",
"tensorflow.slice"
]
]
|
shruti1421/scona | [
"c07512dc80303b312ae73ae271ce91a832fbb258"
]
| [
"scona/visualisations.py"
]
| [
"import warnings\n\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom nilearn import plotting\n\nfrom scona.visualisations_helpers import save_fig\nfrom scona.visualisations_helpers import create_df_sns_barplot\nfrom scona.visualisations_helpers import graph_to_nilearn_array\nfrom scona.visualisations_helpers import setup_color_list\n\n\ndef plot_rich_club(brain_bundle, original_network, figure_name=None,\n color=None, show_legend=True, x_max=None, y_max=None):\n \"\"\"\n This is a visualisation tool for plotting the rich club values per degree\n along with the random rich club values created from a random networks\n with a preserved degree distribution.\n\n Parameters\n ----------\n brain_bundle : `GraphBundle` object\n a python dictionary with BrainNetwork objects as values\n (:class:`str`: :class:`BrainNetwork` pairs), contains original Graph\n and random graphs.\n original_network: str, required\n This should index the particular network in `brain_bundle` that you\n want the figure to highlight. A distribution of all the other networks\n in `brain_bundle` will be rendered for comparison.\n figure_name : str, optional\n path to the file to store the created figure in\n (e.g. \"/home/Desktop/name\") or to store in the current directory\n include just a name (\"fig_name\");\n color : list of 2 strings, optional\n where the 1st string is a color for rich club values and 2nd - for\n random rich club values. You can specify the color using an html hex\n string (e.g. color =[\"#06209c\",\"#c1b8b1\"]) or you can pass an\n (r, g, b) tuple, where each of r, g, b are in the range [0,1].\n Legal html names for colors, like \"red\", \"black\" and so on are also\n supported.\n show_legend: bool (optional, default=True)\n if True - show legend, otherwise - do not display legend.\n x_max : int, optional\n the max length of the x-axis of the plot\n y_max : int, optional\n the max length of the y-axis of the plot\n\n Returns\n -------\n Plot the figure and if figure_name is given then save the image\n in a file named according to the figure_name variable.\n \"\"\"\n\n # set the seaborn style and context in the beginning!\n sns.set(style=\"white\")\n sns.set_context(\"poster\", font_scale=1)\n\n # calculate rich club coefficients for each graph in Graph Bundle\n rich_club_df = brain_bundle.report_rich_club()\n\n # get the degrees\n degree = rich_club_df.index.values\n\n # select the values of the 1st Graph in Graph Bundle - Real Graph\n try:\n rc_orig = np.array(rich_club_df[original_network])\n except KeyError:\n raise KeyError(\n \"Please check the name of the initial Graph (the proper network, \"\n \"the one you got from the mri data) in GraphBundle. There is\"\n \" no graph keyed by name \\\"{}\\\"\".format(original_network))\n\n # create a dataframe of random Graphs (exclude Real Graph)\n rand_df = rich_club_df.drop(original_network, axis=1)\n\n # re-organize rand_df dataframe in a suitable way\n # so that there is one column for the degrees data, one for rich club\n # values required for seaborn plotting with error bars\n\n # create array to store the degrees\n rand_degree = []\n\n # create array to store a rich_club values according to the degree\n rc_rand = []\n\n # append each column in rand_df to a list\n for i in range(len(rand_df.columns)):\n rand_degree = np.append(rand_degree, rand_df.index.values)\n rc_rand = np.append(rc_rand, rand_df.iloc[:, i])\n\n new_rand_df = pd.DataFrame({'Degree': rand_degree, 'Rich Club': rc_rand})\n\n # create a figure\n fig, ax = plt.subplots(figsize=(10, 6))\n\n # set the default colors of plotted values if not provided\n if color is None:\n color = [\"#00C9FF\", \"grey\"]\n elif len(color) == 1: # if you only want to plot the original\n color.append(\"grey\") # network (no random networks)\n\n # if the user provided color not as a list of size 2 - show a warning\n # and then carry on but using the default colors\n\n if not isinstance(color, list) and len(color) != 2:\n warnings.warn(\"Please, provide a *color* parameter as a \"\n \"python list object, e.g. [\\\"green\\\", \\\"pink\\\"]. \"\n \"Right now the default colors will be used\")\n color = [\"#00C9FF\", \"grey\"]\n\n # plot the rich club values of real Graph\n ax = sns.lineplot(x=degree, y=rc_orig, label=\"Observed network\", zorder=1,\n color=color[0])\n\n # plot the random rich club values of random graphs\n ax = sns.lineplot(x=\"Degree\", y=\"Rich Club\", data=new_rand_df,\n err_style=\"band\", ci=95, color=color[1],\n label=\"Random network\", zorder=2)\n\n # set the max values of x & y - axis if not given\n if x_max is None:\n x_max = max(degree)\n\n if y_max is None:\n y_max = max(rc_orig) + 0.1 # let y-axis be longer -> looks better\n\n # set the x and y axis limits\n ax.set_xlim((0, x_max))\n ax.set_ylim((0, y_max))\n\n # set the number of bins to 4\n ax.locator_params(nbins=4)\n\n # set the x and y axis labels\n ax.set_xlabel(\"Degree\")\n ax.set_ylabel(\"Rich Club\")\n\n # create a legend if show_legend = True, otherwise - remove\n if show_legend:\n ax.legend(fontsize=\"x-small\")\n else:\n ax.legend_.remove()\n\n # remove the top and right spines from plot\n sns.despine()\n\n # adjust subplot params so that the subplot fits in to the figure area\n plt.tight_layout()\n\n # display the figure\n plt.show()\n\n # save the figure if the location-to-save is provided\n if figure_name:\n # use the helper-function from module helpers to save the figure\n save_fig(fig, figure_name)\n # close the file after saving to a file\n plt.close(fig)\n\n\ndef plot_network_measures(brain_bundle, original_network, figure_name=None,\n color=None, ci=95, show_legend=True):\n \"\"\"\n This is a visualisation tool for plotting network measures values\n along with the random network values created from a random networks.\n\n Parameters\n ----------\n brain_bundle : :class:`GraphBundle`\n a python dictionary with BrainNetwork objects as values\n (:class:`str`: :class:`BrainNetwork` pairs), contains real Graph and\n random graphs.\n original_network: str, required\n This should index the particular network in `brain_bundle` that you\n want the figure to highlight. A distribution of all the other networks\n in `brain_bundle` will be rendered for comparison.\n figure_name : str, optional\n path to the file to store the created figure in\n (e.g. \"/home/Desktop/name\") or to store in the current directory\n include just a name (\"fig_name\").\n color : list of 2 strings, optional\n where the 1st string is a color for original network measures and the\n 2nd is for the values from the random graphs.\n You can specify the color using an html hex string\n (e.g. color =[\"#06209c\",\"#c1b8b1\"]) or you can pass an (r, g, b) tuple,\n where each of r, g, b are in the range [0,1]. Finally, legal html names\n for colors, like \"red\", \"black\" and so on are supported.\n show_legend: bool (optional, default=True)\n if True - show legend, otherwise - do not display legend.\n ci: float or “sd” or None (optional, default=95)\n Size of confidence intervals to draw around estimated values. If “sd”,\n skip bootstrapping and draw the standard deviation of the observations.\n If None, no bootstrapping will be performed, and error bars will not be\n drawn.\n Returns\n -------\n Plot the Figure and if figure_name provided, save it in a figure_name\n file.\n \"\"\"\n\n # set the seaborn style and context in the beginning!\n sns.set(style=\"white\")\n sns.set_context(\"poster\", font_scale=1)\n\n # build a new DataFrame required for seaborn.barplot\n seaborn_data = create_df_sns_barplot(brain_bundle, original_network)\n\n # set the default colors of barplot values if not provided\n if color is None:\n color = [sns.color_palette()[0], \"lightgrey\"]\n elif len(color) == 1: # in case we want to plot only real values\n color.append(\"lightgrey\")\n\n # if the user provided color not as a list of size 2 - show warning\n # use default colors\n if not isinstance(color, list) and len(color) != 2:\n warnings.warn(\"Please, provide a *color* parameter as a \"\n \"python list object, e.g. [\\\"green\\\", \\\"pink\\\"]. \"\n \"Right now the default colors will be used\")\n color = [sns.color_palette()[0], \"lightgrey\"]\n\n # Create a figure\n fig, ax = plt.subplots(figsize=(8, 6))\n\n # plot global measures with error bars\n ax = sns.barplot(x=\"measure\", y=\"value\", hue=\"TypeNetwork\",\n data=seaborn_data, palette=[color[0], color[1]], ci=ci)\n\n # make a line at y=0\n ax.axhline(0, linewidth=0.8, color='black')\n\n # set labels for y axix\n ax.set_ylabel(\"Global network measures\")\n ax.set_xlabel(\"\") # empty -> no x-label\n\n # create a legend if show_legend = True, otherwise - remove\n if show_legend:\n ax.legend(fontsize=\"xx-small\")\n else:\n ax.legend_.remove()\n\n # remove the top and right spines from plot\n sns.despine()\n\n # adjust subplot params so that the subplot fits in to the figure area\n plt.tight_layout()\n\n # display the figure\n plt.show()\n\n # save the figure if the location-to-save is provided\n if figure_name:\n # use the helper-function from module helpers to save the figure\n save_fig(fig, figure_name)\n # close the file after saving to a file\n plt.close(fig)\n\n\ndef plot_degree_dist(G, binomial_graph=True, seed=10, figure_name=None,\n color=None):\n \"\"\"\n This is a visualisation tool for plotting the degree distribution\n along with the degree distribution of an Erdos Renyi random graph\n that has the same number of nodes.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n BrainNetwork object\n binomial_graph : bool (optional, default=True)\n if \"True\" plot the degree distribution of an Erdos Renyi random graph.\n seed : integer (default=10), random_state, or None\n Seed for random number generator. In case it is needed to create random\n Erdos Renyi Graph, set `seed` to None.\n figure_name : str, optional\n path to the file to store the created figure in (e.g. \"/home/Desktop/name\")\n or to store in the current directory include just a name (\"fig_name\");\n color : list of 2 strings, optional\n where the 1st string is a color for rich club values and 2nd - for random\n rich club values. You can specify the color using an html hex string\n (e.g. color =[\"#06209c\",\"#c1b8b1\"]) or you can pass an (r, g, b) tuple,\n where each of r, g, b are in the range [0,1]. Finally, legal html names\n for colors, like \"red\", \"black\" and so on are supported.\n\n Returns\n -------\n Plot the Figure and if figure_name given, save it in a figure_name file.\n \"\"\"\n\n # set the default colors of plotted values if not provided\n if color is None:\n color = [sns.color_palette()[0], \"grey\"]\n\n # if the user provided color not as a list of size 2\n # show warning, use default colors\n if not isinstance(color, list) and len(color) == 2:\n warnings.warn(\"Please, provide a *color* parameter as a \"\n \"python list object, e.g. [\\\"green\\\", \\\"pink\\\"]. \"\n \"Right now the default colors will be used\")\n color = [sns.color_palette()[0], \"grey\"]\n\n # set the seaborn style and context in the beginning!\n sns.set(style=\"white\")\n sns.set_context(\"poster\", font_scale=1)\n\n # calculate the degrees from the graph\n degrees = np.array(list(dict(G.degree()).values()))\n\n # calculate the Erdos Renyi graph from the main graph\n nodes = len(G.nodes())\n\n # set the cost as the probability for edge creation\n cost = G.number_of_edges() * 2.0 / (nodes*(nodes-1))\n G_ER = nx.erdos_renyi_graph(nodes, cost, seed=seed)\n\n # calculate the degrees for the ER graph\n degrees_ER = np.array(list(dict(G_ER.degree()).values()))\n\n # create a figure\n fig, ax = plt.subplots(figsize=(10, 6))\n\n # plot distribution of graph's degrees\n ax = sns.distplot(degrees, color=color[0])\n\n # plot a Erdos Renyi graph density estimate\n if binomial_graph:\n ax = sns.kdeplot(degrees_ER, color=color[1])\n\n # fix the x axis limits without the gap between the 1st column and x = 0\n # start from 1\n ax.set_xlim((1, max(degrees)))\n\n # set the number of bins to 5\n ax.locator_params(axis=\"x\", nbins=5)\n\n # Set the x and y axis labels\n ax.set_xlabel(\"Degree\")\n ax.set_ylabel(\"Probability\")\n\n # remove the top and right spines from plot\n sns.despine()\n\n # adjust subplot params so that the subplot fits in to the figure area\n plt.tight_layout()\n\n # display the figure\n plt.show()\n\n # save the figure if the location-to-save is provided\n if figure_name:\n # use the helper-function from module helpers to save the figure\n save_fig(fig, figure_name)\n # close the file after saving to a file\n plt.close(fig)\n\n\ndef view_nodes_3d(\n G,\n node_size=5.,\n node_color='black',\n measure=None,\n cmap_name=None,\n sns_palette=None,\n continuous=False,\n vmin=None,\n vmax=None):\n \"\"\"\n Plot nodes of a BrainNetwork using\n :func:`nilearn.plotting.view_markers()` tool.\n\n Insert a 3d plot of markers in a brain into an HTML page.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n G should have nodal locations in MNI space indexed by nodal\n attribute \"centroids\"\n\n node_size : float or array-like, optional (default=5.)\n Size of the nodes showing the seeds in pixels.\n\n node_color : str or list of str (default 'black')\n node_colour determines the colour given to each node.\n If a single string is given, this string will be interpreted as a\n a colour, and all nodes will be rendered in this colour.\n If a list of colours is given, it must be the same length as the length\n of nodes coordinates.\n\n measure: str, (optional, default=None)\n The name of a nodal measure.\n\n cmap_name : Matplotlib colormap\n Colormap for mapping intensities of nodes (default=None).\n\n sns_palette: seaborn palette, (optional, default=None)\n Discrete color palette only for discrete data. List of colors defining\n a color palette (list of RGB tuples from seaborn color palettes).\n\n continuous: bool, (optional, default=False)\n Indicate whether the data values are discrete (False) or\n continuous (True).\n\n vmin : scalar or None, optional\n The minimum value used in colormapping *data*. If *None* the minimum\n value in *data* is used.\n\n vmax : scalar or None, optional\n The maximum value used in colormapping *data*. If *None* the maximum\n value in *data* is used.\n\n Returns\n -------\n ConnectomeView : plot of the nodes.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n \"\"\"\n\n # get the nodes coordinates\n adj_matrix, node_coords = graph_to_nilearn_array(G)\n\n # apply color to all nodes in Graph if node_color is string\n if isinstance(node_color, str):\n node_color = [node_color for _ in range(len(node_coords))]\n\n # report the attributes of each node in BrainNetwork Graph\n nodal_measures = G.report_nodal_measures()\n\n # get the color for each node based on the nodal measure\n if measure:\n if measure in nodal_measures.columns:\n node_color = setup_color_list(df=nodal_measures, measure=measure,\n cmap_name=cmap_name,\n sns_palette=sns_palette,\n continuous=continuous,\n vmin=vmin,\n vmax=vmax)\n else:\n warnings.warn(\n \"Measure \\\"{}\\\" does not exist in nodal attributes of graph. \"\n \"The default color will be used for all nodes.\".format(measure))\n node_color = [node_color for _ in range(len(node_coords))]\n\n # plot nodes\n ConnectomeView = plotting.view_markers(node_coords,\n marker_color=node_color,\n marker_size=node_size)\n\n return ConnectomeView\n\n\ndef view_connectome_3d(\n G,\n edge_threshold=\"98%\",\n edge_cmap=\"Spectral_r\",\n symmetric_cmap=False,\n linewidth=6.,\n node_size=3.):\n \"\"\"\n Insert a 3d plot of a connectome into an HTML page.\n\n Plot a BrainNetwork using :func:`nilearn.plotting.view_connectome()` tool.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n G should have nodal locations in MNI space indexed by nodal\n attribute \"centroids\".\n\n edge_threshold : str, number or None, optional (default=\"2%\")\n If None, no thresholding.\n If it is a number only connections of amplitude greater\n than threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only connections of amplitude above the\n given percentile will be shown.\n\n edge_cmap : str or matplotlib colormap, optional\n Colormap for displaying edges.\n\n symmetric_cmap : bool, optional (default=False)\n Make colormap symmetric (ranging from -vmax to vmax).\n\n linewidth : float, optional (default=6.)\n Width of the lines that show connections.\n\n node_size : float, optional (default=3.)\n Size of the markers showing the seeds in pixels.\n\n Returns\n -------\n ConnectomeView : plot of the connectome.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n \"\"\"\n\n # get the adjacency matrix and nodes coordinates\n adj_matrix, node_coords = graph_to_nilearn_array(G)\n\n # plot connectome\n ConnectomeView = plotting.view_connectome(adjacency_matrix=adj_matrix,\n node_coords=node_coords,\n edge_threshold=edge_threshold,\n edge_cmap=edge_cmap,\n symmetric_cmap=symmetric_cmap,\n linewidth=linewidth,\n node_size=node_size)\n\n return ConnectomeView\n\n\ndef plot_connectome(\n G,\n node_color='auto', node_size=50,\n edge_cmap=plt.cm.bwr,\n edge_vmin=None, edge_vmax=None,\n edge_threshold=None,\n output_file=None, display_mode='ortho',\n figure=None, axes=None, title=None,\n annotate=True, black_bg=False,\n alpha=0.7,\n edge_kwargs=None, node_kwargs=None,\n colorbar=False):\n \"\"\"\n Plot connectome on top of the brain glass schematics.\n\n The plotted image should be in MNI space for this function to work\n properly.\n\n In the case of ‘l’ and ‘r’ directions (for hemispheric projections),\n markers in the coordinate x == 0 are included in both hemispheres.\n\n Plot a BrainNetwork using :func:`nilearn.plotting.plot_connectome()` tool.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n G should have nodal locations in MNI space indexed by nodal\n attribute \"centroids\".\n\n node_color : color or sequence of colors, optional\n color(s) of the nodes. If string is given, all nodes\n are plotted with same color given in string.\n\n node_size : scalar or array_like, optional (default=50)\n size(s) of the nodes in points^2.\n\n edge_cmap : colormap, optional (default=\"bwr\")\n colormap used for representing the strength of the edges.\n\n edge_vmin : float, optional (default=None)\n\n edge_vmax : float, optional (default=None)\n If not None, either or both of these values will be used to\n as the minimum and maximum values to color edges. If None are\n supplied the maximum absolute value within the given threshold\n will be used as minimum (multiplied by -1) and maximum\n coloring levels.\n\n edge_threshold : str or number, optional (default=None)\n If it is a number only the edges with a value greater than\n edge_threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only the edges with a abs(value) above\n the given percentile will be shown.\n\n output_file : string, or None, optional (default=None)\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n\n display_mode : string, optional (default='ortho')\n Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal,\n 'z' - axial, 'l' - sagittal left hemisphere only,\n 'r' - sagittal right hemisphere only, 'ortho' - three cuts are\n performed in orthogonal directions. Possible values are: 'ortho',\n 'x', 'y', 'z', 'xz', 'yx', 'yz', 'l', 'r', 'lr', 'lzr', 'lyr',\n 'lzry', 'lyrz'.\n\n figure : integer or matplotlib figure, optional (default=None)\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height),\n optional (default=None)\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n\n title : string, optional (default=None)\n The title displayed on the figure.\n\n annotate : boolean, optional (default=True)\n If annotate is True, positions and left/right annotation\n are added to the plot.\n\n black_bg : boolean, optional (default=False)\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\"\n to matplotlib.pyplot.savefig.\n\n alpha : float between 0 and 1, optional (default=0.7)\n Alpha transparency for the brain schematics.\n\n edge_kwargs : dict, optional (default=None)\n will be passed as kwargs for each edge matlotlib Line2D.\n\n node_kwargs : dict, optional (default=None)\n will be passed as kwargs to the plt.scatter call that plots all\n the nodes in one go.\n\n colorbar : bool, optional (default=False)\n If True, display a colorbar on the right of the plots.\n By default it is False.\n\n \"\"\"\n\n # get the adjacency matrix and nodes coordinates\n adj_matrix, node_coords = graph_to_nilearn_array(G)\n\n # plot connectome\n display = plotting.plot_connectome(adjacency_matrix=adj_matrix,\n node_coords=node_coords,\n node_color=node_color,\n node_size=node_size,\n edge_cmap=edge_cmap,\n edge_vmin=edge_vmin,\n edge_vmax=edge_vmax,\n edge_threshold=edge_threshold,\n output_file=output_file,\n display_mode=display_mode,\n figure=figure,\n axes=axes,\n title=title,\n annotate=annotate,\n alpha=alpha,\n black_bg=black_bg,\n edge_kwargs=edge_kwargs,\n node_kwargs=node_kwargs,\n colorbar=colorbar)\n\n return display\n"
]
| [
[
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.append",
"matplotlib.pyplot.show"
]
]
|
And1210/AutoencoderTransformer | [
"9c6142421d311d34f6a00cb90dd49388e5f1cdff"
]
| [
"models/AutoEncoderTransformer_model.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.base_model import BaseModel\n\nclass Encoder(nn.Module):\n def __init__(self, hidden_dims, in_channels=3, encoded_dim=256):\n super().__init__()\n\n modules = []\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(in_channels, out_channels=h_dim,\n kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU())\n )\n in_channels = h_dim\n self.encoder = nn.Sequential(*modules)\n\n self.relu = nn.ReLU(inplace=True)\n\n # self.lin1 = nn.Linear(hidden_dims[-1]*4, 128)\n # self.lin2 = nn.Linear(128, encoded_dim)\n self.lin1 = nn.Linear(hidden_dims[-1]*4, encoded_dim)\n\n def forward(self, x):\n x = self.encoder(x)\n x = torch.flatten(x, start_dim=1)\n\n x = self.lin1(x)\n x = self.relu(x)\n # x = self.lin2(x)\n\n return x\n\nclass Decoder(nn.Module):\n def __init__(self, hidden_dims, encoded_dim=256):\n super().__init__()\n\n self.relu = nn.ReLU(inplace=True)\n\n # self.lin1 = nn.Linear(encoded_dim, 128)\n # self.lin2 = nn.Linear(128, hidden_dims[-1]*4)\n self.lin1 = nn.Linear(encoded_dim, hidden_dims[-1]*4)\n\n modules = []\n hidden_dims.reverse()\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride = 2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU())\n )\n self.decoder = nn.Sequential(*modules)\n\n self.output_layer = nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels= 3,\n kernel_size= 3, padding= 1),\n nn.Sigmoid())\n\n def forward(self, x):\n x = self.lin1(x)\n x = self.relu(x)\n # x = self.lin2(x)\n # x = self.relu(x)\n\n x = x.view(-1, 512, 2, 2)\n x = self.decoder(x)\n x = self.output_layer(x)\n\n return x\n\n#PyTorch defined model\nclass AutoEncoderTransformer(nn.Module):\n \"\"\"basenet for fer2013\"\"\"\n def __init__(self, encoded_dim=256):\n super(AutoEncoderTransformer, self).__init__()\n\n hidden_dims = [128, 256, 512]\n\n self.encoder = Encoder(hidden_dims, 3, encoded_dim)\n self.decoder = Decoder(hidden_dims, encoded_dim)\n\n hidden_dims.reverse()\n self.noise_encoder = Encoder(hidden_dims, 3, encoded_dim)\n self.noise_decoder = Decoder(hidden_dims, encoded_dim)\n\n # self.transformer = nn.Transformer(d_model=encoded_dim, batch_first=True)\n self.transformer = nn.TransformerEncoderLayer(d_model=encoded_dim, nhead=8, batch_first=True)\n\n def forward(self, x):\n input = x\n x = self.encoder.forward(x)\n x = torch.unsqueeze(x, 1)\n x = self.transformer(x)\n x = torch.squeeze(x, 1)\n x = self.decoder.forward(x)\n\n noise = self.noise_encoder.forward(input)\n noise = self.noise_decoder.forward(noise)\n x = x - noise\n\n return x\n\n#The abstract model class, uses above defined class and is used in the train script\nclass AutoEncoderTransformermodel(BaseModel):\n \"\"\"basenet for fer2013\"\"\"\n\n def __init__(self, configuration):\n super().__init__(configuration)\n\n #Initialize model defined above\n self.model = AutoEncoderTransformer(configuration['encoded_dim'])\n self.model.cuda()\n\n #Define loss function\n self.criterion_loss = nn.MSELoss().cuda()\n #Define optimizer\n self.optimizer = torch.optim.Adam(\n self.model.parameters(),\n lr=configuration['lr'],\n betas=(configuration['momentum'], 0.999),\n weight_decay=configuration['weight_decay']\n )\n\n #Need to include these arrays with the optimizers and names of loss functions and models\n #Will be used by other functions for saving/loading\n # self.optimizers = [self.optimizers[i] for i in range(4)]\n self.optimizers = [self.optimizer]\n self.loss_names = ['total']\n self.network_names = ['model']\n\n self.val_images = []\n self.val_predictions = []\n self.val_labels = []\n\n #Calls the models forwards function\n def forward(self):\n x = self.input\n self.output = self.model.forward(x)\n return self.output\n\n #Computes the loss with the specified name (in this case 'total')\n def compute_loss(self):\n # print(self.output.shape)\n # print(self.label.shape)\n self.loss_total = self.criterion_loss(self.output, self.input)\n\n #Compute backpropogation for the model\n def optimize_parameters(self):\n self.loss_total.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n torch.cuda.empty_cache()\n\n #Test function for the model\n def test(self):\n super().test() # run the forward pass\n\n # save predictions and labels as flat tensors\n self.val_images.append(self.input)\n self.val_predictions.append(self.output)\n self.val_labels.append(self.label)\n\n #Should be run after each epoch, outputs accuracy\n def post_epoch_callback(self, epoch, visualizer):\n self.val_predictions = torch.cat(self.val_predictions, dim=0)\n predictions = torch.argmax(self.val_predictions, dim=1)\n predictions = torch.flatten(predictions).cpu()\n\n self.val_labels = torch.cat(self.val_labels, dim=0)\n labels = torch.flatten(self.val_labels).cpu()\n\n self.val_images = torch.squeeze(torch.cat(self.val_images, dim=0)).cpu()\n\n # Calculate and show accuracy\n val_accuracy = accuracy_score(labels, predictions)\n\n metrics = OrderedDict()\n metrics['Accuracy'] = val_accuracy\n\n if (visualizer != None):\n visualizer.plot_current_validation_metrics(epoch, metrics)\n print('Validation accuracy: {0:.3f}'.format(val_accuracy))\n\n # Here you may do something else with the validation data such as\n # displaying the validation images or calculating the ROC curve\n\n self.val_images = []\n self.val_predictions = []\n self.val_labels = []\n\n def load_autoencoder(self, weights):\n keys = list(weights.keys())\n with torch.no_grad():\n for key in keys:\n var_list = key.split('.')\n layer = self.model\n for v in var_list:\n layer = getattr(layer, v)\n layer.copy_(weights[key])\n\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n for param in self.model.decoder.parameters():\n param.requires_grad = False\n\n\nif __name__ == \"__main__\":\n net = TEMPLATEmodel().cuda()\n from torchsummary import summary\n\n print(summary(net, input_size=(1, 48, 48)))\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.LeakyReLU",
"torch.no_grad",
"torch.unsqueeze",
"torch.nn.ReLU",
"torch.squeeze",
"torch.cuda.empty_cache",
"torch.nn.Conv2d",
"torch.flatten",
"torch.nn.TransformerEncoderLayer",
"torch.argmax"
]
]
|
ilBarbara/BioRL | [
"82d2964e19e292fde84ff7cbaaf78c185511d75d"
]
| [
"src/agents/dqn/dqn.py"
]
| [
"\"\"\"\nImplements a DQN learning agent.\n\"\"\"\n\nimport os\nimport pickle\nimport random\nimport time\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom src.agents.dqn.utils import ReplayBuffer, Logger, TestMetric, set_global_seed\nfrom src.envs.utils import ExtraAction\n\nclass DQN:\n \"\"\"\n # Required parameters.\n envs : List of environments to use.\n network : Choice of neural network.\n\n # Initial network parameters.\n init_network_params : Pre-trained network to load upon initialisation.\n init_weight_std : Standard deviation of initial network weights.\n\n # DQN parameters\n double_dqn : Whether to use double DQN (DDQN).\n update_target_frequency : How often to update the DDQN target network.\n gamma : Discount factor.\n clip_Q_targets : Whether negative Q targets are clipped (generally True/False for irreversible/reversible agents).\n\n # Replay buffer.\n replay_start_size : The capacity of the replay buffer at which training can begin.\n replay_buffer_size : Maximum buffer capacity.\n minibatch_size : Minibatch size.\n update_frequency : Number of environment steps taken between parameter update steps.\n\n # Learning rate\n update_learning_rate : Whether to dynamically update the learning rate (if False, initial_learning_rate is always used).\n initial_learning_rate : Initial learning rate.\n peak_learning_rate : The maximum learning rate.\n peak_learning_rate_step : The timestep (from the start, not from when training starts) at which the peak_learning_rate is found.\n final_learning_rate : The final learning rate.\n final_learning_rate_step : The timestep of the final learning rate.\n\n # Optional regularization.\n max_grad_norm : The norm grad to clip gradients to (None means no clipping).\n weight_decay : The weight decay term for regularisation.\n\n # Exploration\n update_exploration : Whether to update the exploration rate (False would tend to be used with NoisyNet layers).\n initial_exploration_rate : Inital exploration rate.\n final_exploration_rate : Final exploration rate.\n final_exploration_step : Timestep at which the final exploration rate is reached.\n\n # Loss function\n adam_epsilon : epsilon for ADAM optimisation.\n loss=\"mse\" : Loss function to use.\n\n # Saving the agent\n save_network_frequency : Frequency with which the network parameters are saved.\n network_save_path : Folder into which the network parameters are saved.\n\n # Testing the agent\n evaluate : Whether to test the agent during training.\n test_envs : List of test environments. None means the training environments (envs) are used.\n test_episodes : Number of episodes at each test point.\n test_frequency : Frequency of tests.\n test_save_path : Folder into which the test scores are saved.\n test_metric : The metric used to quantify performance.\n\n # Other\n logging : Whether to log.\n seed : The global seed to set. None means randomly selected.\n \"\"\"\n def __init__(\n self,\n envs,\n network,\n\n # Initial network parameters.\n init_network_params=None,\n init_weight_std=None,\n\n # DQN parameters\n double_dqn=True,\n update_target_frequency=10000,\n gamma=0.99,\n clip_Q_targets=False,\n\n # Replay buffer.\n replay_start_size=50000,\n replay_buffer_size=1000000,\n minibatch_size=32,\n update_frequency=1,\n\n # Learning rate\n update_learning_rate=True,\n initial_learning_rate=0,\n peak_learning_rate=1e-3,\n peak_learning_rate_step=10000,\n final_learning_rate=5e-5,\n final_learning_rate_step=200000,\n\n # Optional regularization.\n max_grad_norm=None,\n weight_decay=0,\n\n # Exploration\n update_exploration=True,\n initial_exploration_rate=1,\n final_exploration_rate=0.1,\n final_exploration_step=1000000,\n\n # Loss function\n adam_epsilon=1e-8,\n loss=\"mse\",\n\n # Saving the agent\n save_network_frequency=10000,\n network_save_path='network',\n\n # Testing the agent\n evaluate=True,\n test_envs=None,\n test_episodes=20,\n test_frequency=10000,\n test_save_path='test_scores',\n test_metric=TestMetric.ENERGY_ERROR,\n\n # Other\n logging=True,\n seed=None\n ):\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.double_dqn = double_dqn\n\n self.replay_start_size = replay_start_size\n self.replay_buffer_size = replay_buffer_size\n self.gamma = gamma\n self.clip_Q_targets = clip_Q_targets\n self.update_target_frequency = update_target_frequency\n self.minibatch_size = minibatch_size\n\n self.update_learning_rate = update_learning_rate\n self.initial_learning_rate = initial_learning_rate\n self.peak_learning_rate = peak_learning_rate\n self.peak_learning_rate_step = peak_learning_rate_step\n self.final_learning_rate = final_learning_rate\n self.final_learning_rate_step = final_learning_rate_step\n\n self.max_grad_norm = max_grad_norm\n self.weight_decay = weight_decay\n self.update_frequency = update_frequency\n self.update_exploration = update_exploration,\n self.initial_exploration_rate = initial_exploration_rate\n self.epsilon = self.initial_exploration_rate\n self.final_exploration_rate = final_exploration_rate\n self.final_exploration_step = final_exploration_step\n self.adam_epsilon = adam_epsilon\n self.logging = logging\n if callable(loss):\n self.loss = loss\n else:\n try:\n self.loss = {'huber': F.smooth_l1_loss, 'mse': F.mse_loss}[loss]\n except KeyError:\n raise ValueError(\"loss must be 'huber', 'mse' or a callable\")\n\n if type(envs) != list:\n envs = [envs]\n self.envs = envs\n self.env, self.acting_in_reversible_spin_env = self.get_random_env()\n\n self.replay_buffers = {}\n for n_spins in set([env.action_space.n for env in self.envs]):\n self.replay_buffers[n_spins] = ReplayBuffer(self.replay_buffer_size)\n\n self.replay_buffer = self.get_replay_buffer_for_env(self.env)\n\n self.seed = random.randint(0, 1e6) if seed is None else seed\n\n for env in self.envs:\n set_global_seed(self.seed, env)\n\n self.network = network().to(self.device)\n self.init_network_params = init_network_params\n self.init_weight_std = init_weight_std\n if self.init_network_params != None:\n print(\"Pre-loading network parameters from {}.\\n\".format(init_network_params))\n self.load(init_network_params)\n else:\n if self.init_weight_std != None:\n def init_weights(m):\n if type(m) == torch.nn.Linear:\n print(\"Setting weights for\", m)\n m.weight.normal_(0, init_weight_std)\n with torch.no_grad():\n self.network.apply(init_weights)\n\n self.target_network = network().to(self.device)\n self.target_network.load_state_dict(self.network.state_dict())\n for param in self.target_network.parameters():\n param.requires_grad = False\n\n self.optimizer = optim.Adam(self.network.parameters(), lr=self.initial_learning_rate, eps=self.adam_epsilon,\n weight_decay=self.weight_decay)\n\n self.evaluate = evaluate\n if test_envs in [None, [None]]:\n # By default, test on the same environment(s) as are trained on.\n self.test_envs = self.envs\n else:\n if type(test_envs) != list:\n test_envs = [test_envs]\n self.test_envs = test_envs\n self.test_episodes = int(test_episodes)\n self.test_frequency = test_frequency\n self.test_save_path = test_save_path\n self.test_metric = test_metric\n\n self.losses_save_path = os.path.join(os.path.split(self.test_save_path)[0], \"losses.pkl\")\n\n if not self.acting_in_reversible_spin_env:\n for env in self.envs:\n assert env.extra_action == ExtraAction.NONE, \"For deterministic MDP, no extra action is allowed.\"\n for env in self.test_envs:\n assert env.extra_action == ExtraAction.NONE, \"For deterministic MDP, no extra action is allowed.\"\n\n self.allowed_action_state = self.env.get_allowed_action_states()\n\n self.save_network_frequency = save_network_frequency\n self.network_save_path = network_save_path\n\n def get_random_env(self, envs=None):\n if envs is None:\n env = random.sample(self.envs, k=1)[0]\n else:\n env = random.sample(envs, k=1)[0]\n\n return env, env.reversible_spins\n\n def get_replay_buffer_for_env(self, env):\n return self.replay_buffers[env.action_space.n]\n\n def get_random_replay_buffer(self):\n return random.sample(self.replay_buffers.items(), k=1)[0][1]\n\n def learn(self, timesteps, verbose=False):\n\n if self.logging:\n logger = Logger()\n\n # Initialise the state\n state = torch.as_tensor(self.env.reset())\n score = 0\n losses_eps = []\n t1 = time.time()\n\n test_scores = []\n losses = []\n\n is_training_ready = False\n\n for timestep in range(timesteps):\n\n if not is_training_ready:\n if all([len(rb)>=self.replay_start_size for rb in self.replay_buffers.values()]):\n print('\\nAll buffers have {} transitions stored - training is starting!\\n'.format(\n self.replay_start_size))\n is_training_ready=True\n\n # Choose action\n action = self.act(state.to(self.device).float(), is_training_ready=is_training_ready)\n\n # Update epsilon\n if self.update_exploration:\n self.update_epsilon(timestep)\n\n # Update learning rate\n if self.update_learning_rate:\n self.update_lr(timestep)\n\n # Perform action in environment\n state_next, reward, done, _ = self.env.step(action)\n\n score += reward\n\n # Store transition in replay buffer\n action = torch.as_tensor([action], dtype=torch.long)\n reward = torch.as_tensor([reward], dtype=torch.float)\n state_next = torch.as_tensor(state_next)\n\n done = torch.as_tensor([done], dtype=torch.float)\n\n self.replay_buffer.add(state, action, reward, state_next, done)\n\n if done:\n # Reinitialise the state\n if verbose:\n # print(losses_eps)\n loss_str = \"{:.2e}\".format( 0.0 if len(losses_eps) == 0 else np.nanmean(losses_eps) ) if is_training_ready else \"N/A\"\n print(\"timestep : {}, episode time: {}, score : {}, mean loss: {}, time : {} s\".format(\n (timestep+1),\n self.env.current_step,\n np.round(score,3),\n loss_str,\n round(time.time() - t1, 3)))\n\n if self.logging:\n logger.add_scalar('Episode_score', score, timestep)\n self.env, self.acting_in_reversible_spin_env = self.get_random_env()\n self.replay_buffer = self.get_replay_buffer_for_env(self.env)\n state = torch.as_tensor(self.env.reset())\n score = 0\n losses_eps = []\n t1 = time.time()\n\n else:\n state = state_next\n\n if is_training_ready:\n\n # Update the main network\n if timestep % self.update_frequency == 0:\n\n # Sample a batch of transitions\n transitions = self.get_random_replay_buffer().sample(self.minibatch_size, self.device)\n\n # Train on selected batch\n loss = self.train_step(transitions)\n losses.append([timestep, loss])\n losses_eps.append(loss)\n\n if self.logging:\n logger.add_scalar('Loss', loss, timestep)\n\n # Periodically update target network\n if timestep % self.update_target_frequency == 0:\n self.target_network.load_state_dict(self.network.state_dict())\n\n if (timestep+1) % self.test_frequency == 0 and self.evaluate and is_training_ready:\n test_score = self.evaluate_agent()\n print('\\nTest score: {}\\n'.format(np.round(test_score,3)))\n\n if self.test_metric in [TestMetric.FINAL_CUT,TestMetric.MAX_CUT,TestMetric.CUMULATIVE_REWARD]:\n best_network = all([test_score > score for t,score in test_scores])\n elif self.test_metric in [TestMetric.ENERGY_ERROR, TestMetric.BEST_ENERGY]:\n best_network = all([test_score < score for t, score in test_scores])\n else:\n raise NotImplementedError(\"{} is not a recognised TestMetric\".format(self.test_metric))\n\n if best_network:\n path = self.network_save_path\n path_main, path_ext = os.path.splitext(path)\n path_main += \"_best\"\n if path_ext == '':\n path_ext += '.pth'\n self.save(path_main + path_ext)\n\n test_scores.append([timestep+1,test_score])\n\n if (timestep + 1) % self.save_network_frequency == 0 and is_training_ready:\n path = self.network_save_path\n path_main, path_ext = os.path.splitext(path)\n path_main += str(timestep+1)\n if path_ext == '':\n path_ext += '.pth'\n self.save(path_main+path_ext)\n\n if self.logging:\n logger.save()\n\n path = self.test_save_path\n if os.path.splitext(path)[-1] == '':\n path += '.pkl'\n\n with open(path, 'wb+') as output:\n pickle.dump(np.array(test_scores), output, pickle.HIGHEST_PROTOCOL)\n if verbose:\n print('test_scores saved to {}'.format(path))\n\n with open(self.losses_save_path, 'wb+') as output:\n pickle.dump(np.array(losses), output, pickle.HIGHEST_PROTOCOL)\n if verbose:\n print('losses saved to {}'.format(self.losses_save_path))\n\n\n @torch.no_grad()\n def __only_bad_actions_allowed(self, state, network):\n x = (state[0, :] == self.allowed_action_state).nonzero()\n q_next = network(state.to(self.device).float())[x].max()\n return True if q_next < 0 else False\n\n def train_step(self, transitions):\n\n states, actions, rewards, states_next, dones = transitions\n\n if self.acting_in_reversible_spin_env:\n # Calculate target Q\n with torch.no_grad():\n if self.double_dqn:\n greedy_actions = self.network(states_next.float()).argmax(1, True)\n q_value_target = self.target_network(states_next.float()).gather(1, greedy_actions)\n else:\n q_value_target = self.target_network(states_next.float()).max(1, True)[0]\n\n else:\n target_preds = self.target_network(states_next.float())\n # disallowed_actions_mask = (states_next[:, 0, :] != self.allowed_action_state)\n disallowed_actions_mask = (states_next[:, :, 0] != self.allowed_action_state)\n # Calculate target Q, selecting ONLY ALLOWED ACTIONS greedily.\n with torch.no_grad():\n if self.double_dqn:\n network_preds = self.network(states_next.float())\n # Set the Q-value of disallowed actions to a large negative number (-10000) so they are not selected.\n network_preds_allowed = network_preds.masked_fill(disallowed_actions_mask,-10000)\n greedy_actions = network_preds_allowed.argmax(1, True)\n q_value_target = target_preds.gather(1, greedy_actions)\n else:\n q_value_target = target_preds.masked_fill(disallowed_actions_mask,-10000).max(1, True)[0]\n\n if self.clip_Q_targets:\n q_value_target[q_value_target < 0] = 0\n\n # Calculate TD target\n td_target = rewards + (1 - dones) * self.gamma * q_value_target\n\n loss_list = []\n update_iternum = 5\n for i in range(update_iternum):\n # Calculate Q value\n q_value = self.network(states.float()).gather(1, actions)\n\n # Calculate loss\n loss = self.loss(q_value, td_target, reduction='mean')\n loss_list.append(loss.item())\n\n # Update weights\n self.optimizer.zero_grad()\n loss.backward()\n\n if self.max_grad_norm is not None: #Optional gradient clipping\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.max_grad_norm)\n\n self.optimizer.step()\n\n q_value_after = self.network(states.float()).gather(1, actions)\n loss_after_opt = self.loss(q_value_after, td_target, reduction='mean')\n loss_list.append(loss_after_opt.item())\n print(loss_list)\n\n return loss.item()\n\n def act(self, state, is_training_ready=True):\n if is_training_ready and random.uniform(0, 1) >= self.epsilon:\n # Action that maximises Q function\n action = self.predict(state)\n else:\n if self.acting_in_reversible_spin_env:\n # Random random spin.\n action = np.random.randint(0, self.env.action_space.n)\n else:\n # Flip random spin from that hasn't yet been flipped.\n # x = (state[0, :] == self.allowed_action_state).nonzero()\n x = (state[:, 0] == self.allowed_action_state).nonzero()\n action = x[np.random.randint(0, len(x))].item()\n return action\n\n def update_epsilon(self, timestep):\n eps = self.initial_exploration_rate - (self.initial_exploration_rate - self.final_exploration_rate) * (\n timestep / self.final_exploration_step\n )\n self.epsilon = max(eps, self.final_exploration_rate)\n\n def update_lr(self, timestep):\n if timestep <= self.peak_learning_rate_step:\n lr = self.initial_learning_rate - (self.initial_learning_rate - self.peak_learning_rate) * (\n timestep / self.peak_learning_rate_step\n )\n elif timestep <= self.final_learning_rate_step:\n lr = self.peak_learning_rate - (self.peak_learning_rate - self.final_learning_rate) * (\n (timestep - self.peak_learning_rate_step) / (self.final_learning_rate_step - self.peak_learning_rate_step)\n )\n else:\n lr = None\n\n if lr is not None:\n for g in self.optimizer.param_groups:\n g['lr'] = lr\n\n\n @torch.no_grad()\n def predict(self, states, acting_in_reversible_spin_env=None):\n\n if acting_in_reversible_spin_env is None:\n acting_in_reversible_spin_env = self.acting_in_reversible_spin_env\n\n qs = self.network(states)\n\n if acting_in_reversible_spin_env:\n if qs.dim() == 1:\n actions = qs.argmax().item()\n else:\n actions = qs.argmax(1, True).squeeze(1).cpu().numpy()\n return actions\n else:\n if qs.dim() == 1:\n # x = (states[0, :] == self.allowed_action_state).nonzero()\n x = (states[:, 0] == self.allowed_action_state).nonzero()\n actions = x[qs[x].argmax().item()].item()\n else:\n disallowed_actions_mask = (states[:, :, 0] != self.allowed_action_state)\n # disallowed_actions_mask = (states[:, 0, :] != self.allowed_action_state)\n qs_allowed = qs.masked_fill(disallowed_actions_mask, -10000)\n actions = qs_allowed.argmax(1, True).squeeze(1).cpu().numpy()\n return actions\n\n @torch.no_grad()\n def evaluate_agent(self, batch_size=None):\n \"\"\"\n Evaluates agent's current performance. Run multiple evaluations at once\n so the network predictions can be done in batches.\n \"\"\"\n if batch_size is None:\n batch_size = self.minibatch_size\n\n i_test = 0\n i_comp = 0\n test_scores = []\n batch_scores = [0]*batch_size\n\n test_envs = np.array([None]*batch_size)\n obs_batch = []\n\n for i in range(len(self.test_envs)):\n test_env = self.test_envs[i]\n obs = test_env.reset()\n score = 0.0\n while True:\n action = self.predict(torch.FloatTensor(obs).to(self.device),\n test_env.reversible_spins)\n obs, rew, done, info = test_env.step(action)\n score += rew\n if done:\n test_scores.append(score)\n break\n\n '''\n while i_comp < self.test_episodes:\n\n for i, env in enumerate(test_envs):\n if env is None and i_test < self.test_episodes:\n test_env, testing_in_reversible_spin_env = self.get_random_env(self.test_envs)\n obs = test_env.reset()\n test_env = deepcopy(test_env)\n\n test_envs[i] = test_env\n obs_batch.append(obs)\n\n i_test += 1\n\n actions = self.predict(torch.FloatTensor(np.array(obs_batch)).to(self.device),\n testing_in_reversible_spin_env)\n\n obs_batch = []\n\n i = 0\n for env, action in zip(test_envs, actions):\n\n if env is not None:\n obs, rew, done, info = env.step(action)\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] += rew\n\n if done:\n if self.test_metric == TestMetric.BEST_ENERGY:\n batch_scores[i] = env.best_energy\n elif self.test_metric == TestMetric.ENERGY_ERROR:\n batch_scores[i] = abs(env.best_energy - env.calculate_best()[0])\n elif self.test_metric == TestMetric.MAX_CUT:\n batch_scores[i] = env.get_best_cut()\n elif self.test_metric == TestMetric.FINAL_CUT:\n batch_scores[i] = env.calculate_cut()\n\n test_scores.append(batch_scores[i])\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] = 0\n\n i_comp += 1\n test_envs[i] = None\n else:\n obs_batch.append(obs)\n\n i += 1\n\n if self.test_metric == TestMetric.ENERGY_ERROR:\n print(\"\\n{}/{} graphs solved optimally\".format(np.count_nonzero(np.array(test_scores)==0),self.test_episodes), end=\"\")\n '''\n print(test_scores)\n return np.mean(test_scores)\n\n def save(self, path='network.pth'):\n if os.path.splitext(path)[-1]=='':\n path + '.pth'\n torch.save(self.network.state_dict(), path)\n\n def load(self,path):\n self.network.load_state_dict(torch.load(path,map_location=self.device))"
]
| [
[
"numpy.array",
"numpy.round",
"torch.no_grad",
"torch.FloatTensor",
"numpy.mean",
"numpy.nanmean",
"torch.cuda.is_available",
"numpy.random.randint",
"torch.load",
"torch.as_tensor"
]
]
|
xianba/datadrawer | [
"864e0cd72f169a3cf107b827a5aa1b3a0717dd09"
]
| [
"datadrawer/src/draw.py"
]
| [
"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n\"\"\"\r\n@File : draw.py\r\n@Version : 1.0.0\r\n@Author : xiaxianba\r\n@License : (C) Copyright 2006-2019\r\n@Contact : [email protected]\r\n@Software: PyCharm\r\n@Time : 2019/5/6 15:14\r\n@Desc :\r\n\"\"\"\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef draw_bar(list_x, list_y, label_x=\"X\", label_y=\"Y\", label_title=\"Title\"):\r\n\r\n plt.bar(list_x, list_y, align='center')\r\n plt.title(label_title)\r\n plt.xlabel(label_x)\r\n plt.ylabel(label_y)\r\n plt.show()\r\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar"
]
]
|
Kundusoumik123/ML-OpenCV-Projects | [
"38807a9fbc436624ee5808c393e89d8a22502e73"
]
| [
"New Projects/face_recognize.py"
]
| [
"import cv2, sys, numpy, os \r\nsize = 4\r\nhaar_file = 'haarcascade_frontalface_default.xml'\r\ndatasets = 'datasets'\r\nprint('Recognizing Face Please Be in sufficient Lights...') \r\n(images, lables, names, id) = ([], [], {}, 0) \r\nfor (subdirs, dirs, files) in os.walk(datasets): \r\n for subdir in dirs: \r\n names[id] = subdir \r\n subjectpath = os.path.join(datasets, subdir) \r\n for filename in os.listdir(subjectpath): \r\n path = subjectpath + '/' + filename \r\n lable = id\r\n images.append(cv2.imread(path, 0)) \r\n lables.append(int(lable)) \r\n id += 1\r\n(width, height) = (130, 100) \r\n(images, lables) = [numpy.array(lis) for lis in [imagews, lables]] \r\nmodel = cv2.face.LBPHFaceRecognizer_create() #converts each pixel to some binary value\r\nmodel.train(images, lables) \r\nface_cascade = cv2.CascadeClassifier(haar_file) #comparing the pixels with standar harrarcascade file\r\nwebcam = cv2.VideoCapture(0)\r\ncount=1\r\nwhile count<20: \r\n (_, im) = webcam.read() \r\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) #conversion to grayscale image\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5) #detection of multiple images\r\n for (x, y, w, h) in faces:\r\n #dynamically creating rectangles around the faces \r\n cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2) \r\n face = gray[y:y + h, x:x + w] \r\n face_resize = cv2.resize(face, (width, height)) \r\n prediction = model.predict(face_resize) \r\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)\r\n count+=1\r\n \r\n if prediction[1]<500: \r\n cv2.putText(im, '% s - %.0f' % \r\n(names[prediction[0]], prediction[1]), (x-10, y-10), \r\ncv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)) \r\n else: \r\n cv2.putText(im, 'not recognized', \r\n(x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)) \r\n cv2.imshow('OpenCV', im) \r\n key = cv2.waitKey(10) \r\n if key == 27: \r\n break\r\n \r\ncv2.destroyAllWindows()\r\n\r\n"
]
| [
[
"numpy.array"
]
]
|
meyer9/nionui | [
"ca2f9d773bb956e064f40c0cac2465f664447953"
]
| [
"examples/basic/Buttons.py"
]
| [
"# standard libraries\n# None\n\n# third party libraries\nimport numpy\n\n# local libraries\nfrom nion.ui import Application\nfrom nion.ui import Window\nfrom nion.utils import Binding\nfrom nion.utils import Converter\nfrom nion.utils import Model\n\n\n# user program below\n\nclass ButtonsApplication(Application.Application):\n\n def __init__(self, ui):\n super().__init__(ui)\n\n def start(self):\n # the start method should create a document window that will be the focus of the ui\n self.window = ButtonsWindow(self.ui, app=self)\n self.window.title = \"Buttons\"\n self.window.show()\n return True\n\n\nclass ButtonsWindow(Window.Window):\n\n def __init__(self, ui, app=None):\n super().__init__(ui, app)\n\n # a text model to hold the label widget text\n text_model = Model.PropertyModel(0)\n\n # make bitmap_data (random static) for icon push button\n bitmap = numpy.zeros((32, 32, 4), numpy.uint8)\n bitmap[..., 0] = (numpy.random.randn(32, 32) * 255).astype(numpy.uint8) # blue\n bitmap[..., 1] = (numpy.random.randn(32, 32) * 255).astype(numpy.uint8) # green\n bitmap[..., 2] = (numpy.random.randn(32, 32) * 255).astype(numpy.uint8) # red\n bitmap[..., 3] = 255\n bitmap_data = bitmap.view(numpy.uint32).reshape(bitmap.shape[:-1])\n\n # create the widgets for the window\n label_widget = self.ui.create_label_widget()\n push_button_widget = self.ui.create_push_button_widget(\"Push Me\")\n icon_button_widget = self.ui.create_push_button_widget()\n icon_button_widget.icon = bitmap_data\n\n # create a row for the buttons\n button_row = self.ui.create_row_widget()\n button_row.add_spacing(13)\n button_row.add(push_button_widget)\n button_row.add_spacing(13)\n button_row.add(icon_button_widget)\n button_row.add_stretch()\n\n # create a row for the label\n label_row = self.ui.create_row_widget()\n label_row.add_spacing(13)\n label_row.add(label_widget)\n label_row.add_stretch()\n\n # create a column to hold the two rows and attach it to the window\n content = self.ui.create_column_widget()\n content.add(button_row)\n content.add(label_row)\n self.attach_widget(content)\n\n # when either button is clicked, this will be called\n def button_clicked():\n text_model.value = text_model.value + 1\n\n # connect the buttons to the button_clicked function\n push_button_widget.on_clicked = button_clicked\n icon_button_widget.on_clicked = button_clicked\n\n # and bind the label txt to the 'value' property of the text_model, but attach an integer-to-string converter to it.\n label_widget.bind_text(Binding.PropertyBinding(text_model, \"value\", converter=Converter.IntegerToStringConverter(format=\"You have clicked {:d} times.\")))\n\n\ndef main(args, bootstrap_args):\n app = ButtonsApplication(Application.make_ui(bootstrap_args))\n app.initialize()\n return app\n"
]
| [
[
"numpy.random.randn",
"numpy.zeros"
]
]
|
rob-luke/autoreject | [
"b73847949db64740680e3d325b824ed9aa05a8cc"
]
| [
"autoreject/tests/test_utils.py"
]
| [
"# Author: Mainak Jas <[email protected]>\n# License: BSD (3-clause)\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport pytest\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.bem import _check_origin\nfrom mne import io\n\nfrom autoreject.utils import clean_by_interp, interpolate_bads\nfrom autoreject.utils import _interpolate_bads_eeg\nimport mne.channels.interpolation\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nraw = io.read_raw_fif(raw_fname, preload=False)\nraw.crop(0, 15)\nraw.info['projs'] = list()\n\nevoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'\nevoked = mne.read_evokeds(evoked_fname, condition='Left Auditory',\n baseline=(None, 0))\n\n\ndef test_utils():\n \"\"\"Test utils.\"\"\"\n event_id = {'Visual/Left': 3}\n tmin, tmax = -0.2, 0.5\n events = mne.find_events(raw)\n picks = mne.pick_channels(raw.info['ch_names'],\n ['MEG 2443', 'MEG 2442', 'MEG 2441'])\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n picks=picks, baseline=(None, 0),\n reject=None, preload=True)\n\n this_epoch = epochs.copy()\n assert this_epoch.info['bads'] == ['MEG 2443']\n epochs_clean = clean_by_interp(this_epoch)\n assert this_epoch.info['bads'] == ['MEG 2443']\n assert_array_equal(this_epoch.get_data(), epochs.get_data())\n pytest.raises(AssertionError, assert_array_equal, epochs_clean.get_data(),\n this_epoch.get_data())\n\n picks_meg = mne.pick_types(evoked.info, meg='grad', eeg=False, exclude=[])\n picks_eeg = mne.pick_types(evoked.info, meg=False, eeg=True, exclude=[])\n picks_bad_meg = mne.pick_channels(evoked.ch_names, include=['MEG 2443'])\n picks_bad_eeg = mne.pick_channels(evoked.ch_names, include=['EEG 053'])\n evoked_orig = evoked.copy()\n for picks, picks_bad in zip([picks_meg, picks_eeg],\n [picks_bad_meg, picks_bad_eeg]):\n evoked_autoreject = interpolate_bads(evoked, picks=picks,\n reset_bads=False)\n evoked.interpolate_bads(reset_bads=False)\n assert_array_equal(evoked.data[picks_bad],\n evoked_autoreject.data[picks_bad])\n pytest.raises(AssertionError, assert_array_equal,\n evoked_orig.data[picks_bad], evoked.data[picks_bad])\n\n # test that autoreject EEG interpolation code behaves the same as MNE\n evoked_ar = evoked_orig.copy()\n evoked_mne = evoked_orig.copy()\n\n origin = _check_origin('auto', evoked_ar.info)\n _interpolate_bads_eeg(evoked_ar, picks=None)\n mne.channels.interpolation._interpolate_bads_eeg(evoked_mne, origin=origin)\n assert_array_almost_equal(evoked_ar.data, evoked_mne.data)\n\n\ndef test_interpolate_bads():\n \"\"\"Test interpolate bads.\"\"\"\n event_id = None\n events = mne.find_events(raw)\n tmin, tmax = -0.2, 0.5\n for ii, ch_name in enumerate(raw.info['ch_names'][:14]):\n raw.set_channel_types({ch_name: 'bio'})\n raw.rename_channels({ch_name: 'BIO%02d' % ii})\n\n picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=False)\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n baseline=(None, 0), decim=10,\n reject=None, preload=True)[:10]\n epochs.info['bads'] = ['MEG 2212']\n interpolate_bads(epochs, picks)\n"
]
| [
[
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_array_equal"
]
]
|
gyanz/pyclaw | [
"15bbd970dd84ba69337e2cee2b0b1522811b0af5"
]
| [
"examples/advection_2d_annulus/mapc2p.py"
]
| [
"\n\nfrom __future__ import absolute_import\ndef mapc2p(xc,yc):\n \"\"\"\n Specifies the mapping to curvilinear coordinates \n \"\"\"\n import numpy as np\n\n # Polar coordinates (x coordinate = radius, y coordinate = theta)\n xp = xc * np.cos(yc)\n yp = xc * np.sin(yc)\n return xp,yp\n"
]
| [
[
"numpy.sin",
"numpy.cos"
]
]
|
janthiemen/data_scout | [
"6366eedfb20ed429bc96100de4dd2c7409e5dd88"
]
| [
"data_scout/connectors/csv.py"
]
| [
"import csv\nimport random\nimport sys\nfrom typing import List\n\nimport pandas as pd\n\nfrom .connector import Connector\n\n\nclass CSV(Connector):\n \"\"\"\n Read data from a CSV file.\n \"\"\"\n TMP_SINK = False\n MAX_SIZE = 2000000\n MAX_ROWS = 200\n fields = {\n \"filename\": {\"name\": \"Filename\", \"type\": \"file\", \"input\": \"file\", \"help\": \"The filename of the CSV file.\",\n \"required\": True},\n \"delimiter\": {\"name\": \"Delimiter\", \"type\": \"string\", \"input\": \"text\", \"help\": \"The delimiter in the CSV file.\",\n \"required\": True, \"default\": \",\"},\n \"has_header\": {\"name\": \"Has header\", \"type\": \"boolean\", \"input\": \"switch\", \"required\": True, \"default\": False,\n \"help\": \"Does the file have a header containing the column names?.\"},\n \"encoding\": {\"name\": \"Encoding\", \"type\": \"string\", \"input\": \"select\", \"options\": [\"UTF-8\", \"latin-1\"],\n \"default\": \"UTF-8\", \"help\": \"The encoding of the CSV file.\", \"required\": True,\n \"is_advanced\": True},\n }\n\n def __init__(self, arguments):\n \"\"\"Initialize the data source with the given parameters.\n\n Arguments:\n arguments {dict} -- The arguments\n \"\"\"\n super().__init__(arguments)\n self.filename = arguments[\"filename\"]\n self.delimiter = arguments[\"delimiter\"]\n self.has_header = arguments[\"has_header\"]\n self.encoding = arguments[\"encoding\"]\n\n def __call__(self, sample: bool = False, sampling_technique: str = \"top\", column_types: bool = False) -> List[dict]:\n \"\"\"This class is called when the data needs to be loaded.\n\n Arguments:\n :type sample: boolean: Whether to take a sample or not\n :type sampling_technique: str: Which sampling technique to use (top, stratisfied, random)\n\n Returns:\n dict -- The row, including the extra output column\n \"\"\"\n # TODO: Return the data (as a beam stream or a pandas data frame (in case it's a sample))\n if sample:\n # TODO: Make this big data proof (chucking, sampling before loading, etc.)\n with open(self.filename, encoding=self.encoding) as f:\n number_of_rows = sum(1 for line in f)\n\n # We'll return to the start\n f.seek(0)\n row_sizes = []\n for line in f:\n # We'll test the first 25 rows to determine average row size\n row_sizes.append(sys.getsizeof(line))\n\n # We want to check at least 25 rows, at most 250 and ideally 1%\n if len(row_sizes) > max(min(number_of_rows * 0.01, 250), 25):\n break\n\n sample_size = min(self.MAX_ROWS, round(self.MAX_SIZE / (sum(row_sizes) / len(row_sizes))))\n column_names, data = [], []\n\n f.seek(0)\n reader = csv.reader(f, delimiter=self.delimiter)\n i = 0\n\n if sampling_technique == \"top\":\n # We'll just take the top rows\n for row in reader:\n if i == 0 and self.has_header:\n column_names = row\n elif i <= sample_size:\n data.append(row)\n else:\n break\n i += 1\n elif sampling_technique == \"stratified\":\n # We'll take every xth row\n stratified = round(number_of_rows / sample_size)\n for row in reader:\n if i == 0 and self.has_header:\n column_names = row\n elif i % stratified == 0:\n data.append(row)\n i += 1\n else:\n # We're doing random sampling ...\n rows_to_take = random.sample(range(1 if self.has_header else 0, number_of_rows), sample_size)\n rows_to_take = sorted(rows_to_take)\n for row in reader:\n if i == 0 and self.has_header:\n column_names = row\n elif i == rows_to_take[0]:\n data.append(row)\n rows_to_take.pop(0)\n if len(rows_to_take) == 0:\n break\n i += 1\n\n df = pd.DataFrame(data, columns=column_names)\n return df.to_dict(orient=\"records\")\n else:\n # TODO: To be implemented properly!\n # raise NotImplementedError()\n\n df = pd.read_csv(self.filename, sep=self.delimiter, encoding=self.encoding,\n header='infer' if self.has_header else None)\n return df.to_dict(orient=\"records\")\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
Akado2009/katib | [
"cf15cd4dbb3e61814e8054678eeee8c37411fbd1"
]
| [
"pkg/suggestion/nasrl_service.py"
]
| [
"from pkg.suggestion.NAS_Reinforcement_Learning.Controller import Controller\nfrom pkg.suggestion.NAS_Reinforcement_Learning.Operation import SearchSpace\nfrom pkg.suggestion.NAS_Reinforcement_Learning.SuggestionParam import parseSuggestionParam\nimport tensorflow as tf\nimport grpc\nfrom pkg.api.python import api_pb2\nfrom pkg.api.python import api_pb2_grpc\nimport logging\nfrom logging import getLogger, StreamHandler, INFO, DEBUG\nimport json\nimport os\nimport time\n\n\nclass NasrlService(api_pb2_grpc.SuggestionServicer):\n def __init__(self, logger=None):\n self.manager_addr = \"vizier-core\"\n self.manager_port = 6789\n self.registered_studies = list()\n\n self.ctrl_cache_file = \"\"\n self.ctrl_step = 0\n self.is_first_run = True\n\n if not os.path.exists(\"ctrl_cache/\"):\n os.makedirs(\"ctrl_cache/\")\n\n if logger == None:\n self.logger = getLogger(__name__)\n FORMAT = '%(asctime)-15s StudyID %(studyid)s %(message)s'\n logging.basicConfig(format=FORMAT)\n handler = StreamHandler()\n handler.setLevel(INFO)\n self.logger.setLevel(INFO)\n self.logger.addHandler(handler)\n self.logger.propagate = False\n else:\n self.logger = logger\n\n def setup_controller(self, request):\n self.logger.info(\"-\" * 80 + \"\\nSetting Up Suggestion for StudyJob {}\\n\".format(request.study_id) + \"-\" * 80)\n self.tf_graph = tf.Graph()\n self.ctrl_step = 0\n self.ctrl_cache_file = \"ctrl_cache/{}.ckpt\".format(request.study_id)\n self._get_suggestion_param(request.param_id, request.study_id)\n self._get_search_space(request.study_id)\n\n with self.tf_graph.as_default():\n ctrl_param = self.suggestion_config\n self.controllers = Controller(\n num_layers=self.num_layers,\n num_operations=self.num_operations,\n lstm_size=ctrl_param['lstm_num_cells'],\n lstm_num_layers=ctrl_param['lstm_num_layers'],\n lstm_keep_prob=ctrl_param['lstm_keep_prob'],\n lr_init=ctrl_param['init_learning_rate'],\n lr_dec_start=ctrl_param['lr_decay_start'],\n lr_dec_every=ctrl_param['lr_decay_every'],\n lr_dec_rate=ctrl_param['lr_decay_rate'],\n l2_reg=ctrl_param['l2_reg'],\n entropy_weight=ctrl_param['entropy_weight'],\n bl_dec=ctrl_param['baseline_decay'],\n optim_algo=ctrl_param['optimizer'],\n skip_target=ctrl_param['skip-target'],\n skip_weight=ctrl_param['skip-weight'],\n name=\"Ctrl_\"+request.study_id)\n\n self.controllers.build_trainer()\n\n self.logger.info(\"Suggestion for StudyJob {} has been initialized.\".format(request.study_id))\n\n def GetSuggestions(self, request, context):\n if request.study_id not in self.registered_studies:\n self.setup_controller(request)\n self.is_first_run = True\n self.registered_studies.append(request.study_id)\n\n self.logger.info(\"-\" * 80 + \"\\nSuggestion Step {} for Study {}\\n\".format(self.ctrl_step, request.study_id) + \"-\" * 80)\n\n with self.tf_graph.as_default():\n\n saver = tf.train.Saver()\n ctrl = self.controllers\n\n controller_ops = {\n \"train_step\": ctrl.train_step,\n \"loss\": ctrl.loss,\n \"train_op\": ctrl.train_op,\n \"lr\": ctrl.lr,\n \"grad_norm\": ctrl.grad_norm,\n \"optimizer\": ctrl.optimizer,\n \"baseline\": ctrl.baseline,\n \"entropy\": ctrl.sample_entropy,\n \"sample_arc\": ctrl.sample_arc,\n \"skip_rate\": ctrl.skip_rate}\n\n run_ops = [\n controller_ops[\"loss\"],\n controller_ops[\"entropy\"],\n controller_ops[\"lr\"],\n controller_ops[\"grad_norm\"],\n controller_ops[\"baseline\"],\n controller_ops[\"skip_rate\"],\n controller_ops[\"train_op\"]]\n\n if self.is_first_run:\n self.logger.info(\"First time running suggestion for {}. Random architecture will be given.\".format(request.study_id))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n arc = sess.run(controller_ops[\"sample_arc\"])\n # TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart\n saver.save(sess, self.ctrl_cache_file)\n\n self.is_first_run = False\n\n else:\n with tf.Session() as sess:\n saver.restore(sess, self.ctrl_cache_file)\n\n valid_acc = ctrl.reward\n result = self.GetEvaluationResult(request.study_id)\n\n # This lstm cell is designed to maximize the metrics\n # However, if the user want to minimize the metrics, we can take the negative of the result\n if self.opt_direction == api_pb2.MINIMIZE:\n result = -result\n\n loss, entropy, lr, gn, bl, skip, _ = sess.run(\n fetches=run_ops,\n feed_dict={valid_acc: result})\n self.logger.info(\"Suggetion updated. LSTM Controller Loss: {}\".format(loss))\n arc = sess.run(controller_ops[\"sample_arc\"])\n\n saver.save(sess, self.ctrl_cache_file)\n\n arc = arc.tolist()\n organized_arc = [0 for _ in range(self.num_layers)]\n record = 0\n for l in range(self.num_layers):\n organized_arc[l] = arc[record: record + l + 1]\n record += l + 1\n\n nn_config = dict()\n nn_config['num_layers'] = self.num_layers\n nn_config['input_size'] = self.input_size\n nn_config['output_size'] = self.output_size\n nn_config['embedding'] = dict()\n for l in range(self.num_layers):\n opt = organized_arc[l][0]\n nn_config['embedding'][opt] = self.search_space[opt].get_dict()\n\n organized_arc_json = json.dumps(organized_arc)\n nn_config_json = json.dumps(nn_config)\n\n organized_arc_str = str(organized_arc_json).replace('\\\"', '\\'')\n nn_config_str = str(nn_config_json).replace('\\\"', '\\'')\n\n self.logger.info(\"\\nNew Neural Network Architecture (internal representation):\")\n self.logger.info(organized_arc_json)\n self.logger.info(\"\\nCorresponding Seach Space Description:\")\n self.logger.info(nn_config_str)\n self.logger.info(\"\")\n\n trials = []\n trials.append(api_pb2.Trial(\n study_id=request.study_id,\n parameter_set=[\n api_pb2.Parameter(\n name=\"architecture\",\n value=organized_arc_str,\n parameter_type= api_pb2.CATEGORICAL),\n api_pb2.Parameter(\n name=\"nn_config\",\n value=nn_config_str,\n parameter_type= api_pb2.CATEGORICAL)\n ], \n )\n )\n\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n for i, t in enumerate(trials):\n ctrep = client.CreateTrial(api_pb2.CreateTrialRequest(trial=t), 10)\n trials[i].trial_id = ctrep.trial_id\n self.logger.info(\"Trial {} Created\\n\".format(ctrep.trial_id))\n self.prev_trial_id = ctrep.trial_id\n \n self.ctrl_step += 1\n return api_pb2.GetSuggestionsReply(trials=trials)\n\n def GetEvaluationResult(self, studyID):\n worker_list = []\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n gwfrep = client.GetWorkerFullInfo(api_pb2.GetWorkerFullInfoRequest(study_id=studyID, trial_id=self.prev_trial_id, only_latest_log=True), 10)\n worker_list = gwfrep.worker_full_infos\n\n for w in worker_list:\n if w.Worker.status == api_pb2.COMPLETED:\n for ml in w.metrics_logs:\n if ml.name == self.objective_name:\n self.logger.info(\"Evaluation result of previous candidate: {}\".format(ml.values[-1].value))\n return float(ml.values[-1].value)\n\n # TODO: add support for multiple trials\n\n\n def _get_search_space(self, studyID):\n\n # this function need to\n # 1) get the number of layers\n # 2) get the I/O size\n # 3) get the available operations\n # 4) get the optimization direction (i.e. minimize or maximize)\n # 5) get the objective name\n \n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n gsrep = client.GetStudy(api_pb2.GetStudyRequest(study_id=studyID), 10)\n \n self.opt_direction = gsrep.study_config.optimization_type\n self.objective_name = gsrep.study_config.objective_value_name\n\n all_params = gsrep.study_config.nas_config\n graph_config = all_params.graph_config\n search_space_raw = all_params.operations\n\n self.num_layers = int(graph_config.num_layers)\n self.input_size = list(map(int, graph_config.input_size))\n self.output_size = list(map(int, graph_config.output_size))\n search_space_object = SearchSpace(search_space_raw)\n\n self.logger.info(\"Search Space for Study {}:\".format(studyID))\n\n self.search_space = search_space_object.search_space\n for opt in self.search_space:\n opt.print_op(self.logger)\n \n self.num_operations = search_space_object.num_operations\n self.logger.info(\"There are {} operations in total.\\n\".format(self.num_operations))\n \n\n def _get_suggestion_param(self, paramID, studyID):\n channel = grpc.beta.implementations.insecure_channel(self.manager_addr, self.manager_port)\n with api_pb2.beta_create_Manager_stub(channel) as client:\n gsprep = client.GetSuggestionParameters(api_pb2.GetSuggestionParametersRequest(param_id=paramID), 10)\n \n params_raw = gsprep.suggestion_parameters\n\n suggestion_params = parseSuggestionParam(params_raw)\n\n self.logger.info(\"Parameters of LSTM Controller for Study {}:\".format(studyID))\n for spec in suggestion_params:\n if len(spec) > 13:\n self.logger.info(\"{}: \\t{}\".format(spec, suggestion_params[spec]))\n else:\n self.logger.info(\"{}: \\t\\t{}\".format(spec, suggestion_params[spec]))\n\n self.suggestion_config = suggestion_params\n"
]
| [
[
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer"
]
]
|
wanghuancoder/Paddle | [
"8f2b0860ebe4bd5998c97dfaf2a29702ffd2b52a"
]
| [
"python/paddle/fluid/dygraph/layers.py"
]
| [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport contextlib\nimport sys\nimport numpy as np\nimport six\nimport re\nimport copy\nimport weakref\nimport warnings\nfrom copy import deepcopy\nimport inspect\n\nimport paddle\n\nfrom . import parallel_helper\nfrom .. import unique_name\nfrom paddle.fluid import core\nfrom .layer_object_helper import LayerObjectHelper\nfrom .layer_hooks import record_program_ops_pre_hook, set_op_customized_attrs_post_hook, LayerOpsRecoder\nfrom .base import program_desc_tracing_guard, param_guard, in_declarative_mode\nfrom paddle.fluid import framework\nfrom ..param_attr import ParamAttr\nfrom paddle.fluid.executor import Executor, global_scope\nfrom paddle.fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_\nfrom paddle.fluid.framework import _current_expected_place as _get_device\nfrom paddle.fluid.dygraph import no_grad\nimport paddle.utils.deprecated as deprecated\n\n__all__ = ['Layer']\n\n_first_cap_re = re.compile('(.)([A-Z][a-z]+)')\n_all_cap_re = re.compile('([a-z])([A-Z])')\n\n\ndef _convert_camel_to_snake(name):\n s1 = _first_cap_re.sub(r'\\1_\\2', name)\n return _all_cap_re.sub(r'\\1_\\2', s1).lower()\n\n\ndef _addindent(string, indent):\n s1 = string.split('\\n')\n if len(s1) == 1:\n return string\n s2 = []\n for idx, line in enumerate(s1):\n if idx > 0:\n s2.append(str((indent * ' ') + line))\n return s1[0] + '\\n' + '\\n'.join(s2)\n\n\nclass HookRemoveHelper(object):\n \"\"\" A HookRemoveHelper that can be used to remove hook. \"\"\"\n\n next_hook_id = 0\n\n def __init__(self, hooks):\n self._hooks_ref = weakref.ref(hooks)\n self._hook_id = HookRemoveHelper.next_hook_id\n HookRemoveHelper.next_hook_id += 1\n\n def remove(self):\n hooks = self._hooks_ref()\n if hooks is not None and self._hook_id in hooks:\n del hooks[self._hook_id]\n\n\nclass Layer(object):\n \"\"\"\n Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on.\n\n Parameters:\n name_scope (str, optional): prefix name used by the layer to name parameters.\n If prefix is \"my_layer\", parameter name in MyLayer\n can be \"my_layer_0.w_n\", where \"w\" is the parameter\n base name and \"n\" is an unique suffix auto-generated.\n If None, prefix name will be snake cased class name. Default: None.\n dtype(str, optional): data type of this parameter.\n If set str, it can be \"bool\", \"float16\", \"float32\", \"float64\",\n \"int8\", \"int16\", \"int32\", \"int64\", \"uint8\" or \"uint16\".\n Default: \"float32\"\n\n Returns:\n None\n \"\"\"\n\n def __init__(self, name_scope=None, dtype=\"float32\"):\n self.training = True\n if name_scope is None:\n name_scope = _convert_camel_to_snake(self.__class__.__name__)\n self._full_name = unique_name.generate(name_scope)\n self._helper = LayerObjectHelper(self._full_name)\n self._built = False\n self._dtype = dtype\n self._init_in_dynamic_mode = framework.in_dygraph_mode()\n\n self._parameters = collections.OrderedDict()\n # Buffers the variable (not parameter) created in layer\n self._buffers = collections.OrderedDict()\n self._non_persistable_buffer_names_set = set()\n self._sub_layers = collections.OrderedDict()\n self._loaddict_holder = collections.OrderedDict()\n\n # Record generated op_descs in this layer\n self._op_recorder = LayerOpsRecoder(ops=[], hooks=[])\n self._customized_attrs = {}\n\n self._forward_pre_hooks = collections.OrderedDict()\n self._forward_post_hooks = collections.OrderedDict()\n\n self._casted_by_pure_fp16 = False\n\n self._state_dict_hooks = collections.OrderedDict()\n\n def train(self):\n \"\"\"\n Sets this Layer and all its sublayers to training mode.\n This only effects certain modules like `Dropout` and `BatchNorm`.\n\n Returns:\n None\n\n Example::\n .. code-block:: python\n\n import paddle\n\n class MyLayer(paddle.nn.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n self._linear = paddle.nn.Linear(1, 1)\n self._dropout = paddle.nn.Dropout(p=0.5)\n\n def forward(self, input):\n temp = self._linear(input)\n temp = self._dropout(temp)\n return temp\n\n x = paddle.randn([10, 1], 'float32')\n mylayer = MyLayer()\n mylayer.eval() # set mylayer._dropout to eval mode\n out = mylayer(x)\n mylayer.train() # set mylayer._dropout to train mode\n out = mylayer(x)\n\n \"\"\"\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode,\n # but _dygraph_tracer() can not be called in static mode\n if in_dygraph_mode():\n framework._dygraph_tracer().train_mode()\n # Layer-level setting\n self.training = True\n for layer in self.sublayers():\n layer.training = True\n\n def eval(self):\n \"\"\"\n Sets this Layer and all its sublayers to evaluation mode.\n This only effects certain modules like `Dropout` and `BatchNorm`.\n\n Returns:\n None\n\n Example::\n .. code-block:: python\n\n import paddle\n\n class MyLayer(paddle.nn.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n self._linear = paddle.nn.Linear(1, 1)\n self._dropout = paddle.nn.Dropout(p=0.5)\n\n def forward(self, input):\n temp = self._linear(input)\n temp = self._dropout(temp)\n return temp\n\n x = paddle.randn([10, 1], 'float32')\n mylayer = MyLayer()\n mylayer.eval() # set mylayer._dropout to eval mode\n out = mylayer(x)\n print(out)\n\n \"\"\"\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode,\n # but _dygraph_tracer() can not be called in static mode\n if in_dygraph_mode():\n framework._dygraph_tracer().eval_mode()\n # Layer-level setting\n self.training = False\n for layer in self.sublayers():\n layer.training = False\n\n def apply(self, fn):\n \"\"\"\n Applies ``fn`` recursively to every sublayer (as returned by ``.sublayers()``)\n as well as self. Typical use includes initializing the parameters of a model.\n\n Parameters:\n fn (function): a function to be applied to each sublayer\n\n Returns:\n Layer: self\n\n Example::\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n\n net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))\n\n def init_weights(layer):\n if type(layer) == nn.Linear:\n print('before init weight:', layer.weight.numpy())\n new_weight = paddle.full(shape=layer.weight.shape, dtype=layer.weight.dtype, fill_value=0.9)\n layer.weight.set_value(new_weight)\n print('after init weight:', layer.weight.numpy())\n\n net.apply(init_weights)\n\n print(net.state_dict())\n \"\"\"\n for layer in self.children():\n layer.apply(fn)\n\n fn(self)\n\n return self\n\n def full_name(self):\n \"\"\"Full name for this layer, composed by name_scope + \"/\" + MyLayer.__class__.__name__\n\n Returns:\n str: full name of this layer.\n\n Example::\n .. code-block:: python\n\n import paddle\n\n class LinearNet(paddle.nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__(name_scope = \"demo_linear_net\")\n self._linear = paddle.nn.Linear(1, 1)\n\n def forward(self, x):\n return self._linear(x)\n\n linear_net = LinearNet()\n print(linear_net.full_name()) # demo_linear_net_0\n\n \"\"\"\n return self._full_name\n\n def register_forward_post_hook(self, hook):\n \"\"\"Register a forward post-hook for Layer. The hook will be called after `forward` function has been computed.\n\n It should have the following form, `input` and `output` of the `hook` is `input` and `output` of the `Layer` respectively.\n User can use forward post-hook to change the output of the Layer or perform information statistics tasks on the Layer.\n\n hook(Layer, input, output) -> None or modified output\n\n Parameters:\n hook(function): a function registered as a forward post-hook\n\n Returns:\n HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # the forward_post_hook change the output of the layer: output = output * 2\n def forward_post_hook(layer, input, output):\n # user can use layer, input and output for information statistis tasks\n\n # change the output\n return output * 2\n\n linear = paddle.nn.Linear(13, 5)\n\n # register the hook\n forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook)\n\n value1 = np.arange(26).reshape(2, 13).astype(\"float32\")\n in1 = paddle.to_tensor(value1)\n\n out0 = linear(in1)\n\n # remove the hook\n forward_post_hook_handle.remove()\n\n out1 = linear(in1)\n\n # hook change the linear's output to output * 2, so out0 is equal to out1 * 2.\n assert (out0.numpy() == (out1.numpy()) * 2).any()\n \"\"\"\n hook_remove_helper = HookRemoveHelper(self._forward_post_hooks)\n self._forward_post_hooks[hook_remove_helper._hook_id] = hook\n return hook_remove_helper\n\n def register_forward_pre_hook(self, hook):\n \"\"\"Register a forward pre-hook for Layer. The hook will be called before `forward` function has been computed.\n\n It should have the following form, `input` of the `hook` is `input` of the `Layer`,\n hook can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if\n a single value is returned(unless that value is already a tuple).\n User can use forward pre-hook to change the input of the Layer or perform information statistics tasks on the Layer.\n\n hook(Layer, input) -> None or modified input\n\n Parameters:\n hook(function): a function registered as a forward pre-hook\n\n Returns:\n HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # the forward_post_hook change the input of the layer: input = input * 2\n def forward_pre_hook(layer, input):\n # user can use layer and input for information statistis tasks\n\n # change the input\n input_return = (input[0] * 2)\n return input_return\n\n linear = paddle.nn.Linear(13, 5)\n\n # register the hook\n forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook)\n\n value0 = np.arange(26).reshape(2, 13).astype(\"float32\")\n in0 = paddle.to_tensor(value0)\n out0 = linear(in0)\n\n # remove the hook\n forward_pre_hook_handle.remove()\n\n value1 = value0 * 2\n in1 = paddle.to_tensor(value1)\n out1 = linear(in1)\n\n # hook change the linear's input to input * 2, so out0 is equal to out1.\n assert (out0.numpy() == out1.numpy()).any()\n \"\"\"\n hook_remove_helper = HookRemoveHelper(self._forward_pre_hooks)\n self._forward_pre_hooks[hook_remove_helper._hook_id] = hook\n return hook_remove_helper\n\n def create_parameter(self,\n shape,\n attr=None,\n dtype=None,\n is_bias=False,\n default_initializer=None):\n \"\"\"Create parameters for this layer.\n\n Parameters:\n shape(list): Shape of the parameter.\n attr(ParamAttr, optional): Parameter attribute of weight. Please refer to :ref:`api_paddle_ParamAttr`. Default: None.\n dtype(str, optional): Data type of this parameter.\n If set str, it can be \"bool\", \"float16\", \"float32\", \"float64\",\n \"int8\", \"int16\", \"int32\", \"int64\", \"uint8\" or \"uint16\". Default: \"float32\".\n is_bias(bool, optional): if this is a bias parameter. Default: False.\n default_initializer(Initializer, optional): the default initializer for this parameter.\n If set None, default initializer will be set to paddle.nn.initializer.Xavier and paddle.nn.initializer.Constant\n for non-bias and bias parameter, respectively. Default: None.\n\n Returns:\n :Tensor, created parameter.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MyLayer(paddle.nn.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n self._linear = paddle.nn.Linear(1, 1)\n w_tmp = self.create_parameter([1,1])\n self.add_parameter(\"w_tmp\", w_tmp)\n\n def forward(self, input):\n return self._linear(input)\n\n mylayer = MyLayer()\n for name, param in mylayer.named_parameters():\n print(name, param) # will print w_tmp,_linear.weight,_linear.bias\n\n \"\"\"\n temp_attr = copy.deepcopy(attr)\n if isinstance(temp_attr, six.string_types) and temp_attr == \"\":\n temp_attr = None\n return self._helper.create_parameter(temp_attr, shape, dtype, is_bias,\n default_initializer)\n\n @deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.nn.Layer.create_tensor\",\n reason=\"New api in create_tensor, easier to use.\")\n def create_variable(self, name=None, persistable=None, dtype=None):\n \"\"\"\n\n Create Tensor for this layer.\n\n Parameters:\n name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None\n\n persistable(bool, optional): if set this tensor persistable. Default: False\n\n dtype(str, optional): data type of this parameter. If set str, it can be \"bool\", \"float16\", \"float32\", \"float64\",\"int8\", \"int16\", \"int32\", \"int64\", \"uint8\" or \"uint16\". If set None, it will be \"float32\". Default: None\n\n Returns:\n Tensor, created Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MyLinear(paddle.nn.Layer):\n def __init__(self,\n in_features,\n out_features):\n super(MyLinear, self).__init__()\n self.linear = paddle.nn.Linear( 10, 10)\n\n self.back_var = self.create_variable(name = \"linear_tmp_0\", dtype=self._dtype)\n\n def forward(self, input):\n out = self.linear(input)\n paddle.assign( out, self.back_var)\n\n return out\n\n \"\"\"\n if name is not None:\n var_name = \".\".join([self._full_name, name])\n else:\n var_name = unique_name.generate(\".\".join(\n [self._full_name, \"_generated_var\"]))\n\n return self._helper.main_program.current_block().create_var(\n name=var_name,\n persistable=persistable,\n dtype=dtype,\n type=core.VarDesc.VarType.LOD_TENSOR)\n\n # TODO: Add more parameter list when we need them\n def create_tensor(self, name=None, persistable=None, dtype=None):\n \"\"\"\n\n Create Tensor for this layer.\n\n Parameters:\n name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None\n persistable(bool, optional): if set this tensor persistable. Default: False\n dtype(str, optional): data type of this parameter.\n If set str, it can be \"bool\", \"float16\", \"float32\", \"float64\",\n \"int8\", \"int16\", \"int32\", \"int64\", \"uint8\" or \"uint16\".\n If set None, it will be \"float32\". Default: None\n\n Returns:\n Tensor, created Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MyLinear(paddle.nn.Layer):\n def __init__(self,\n in_features,\n out_features):\n super(MyLinear, self).__init__()\n self.linear = paddle.nn.Linear( 10, 10)\n\n self.back_var = self.create_tensor(name = \"linear_tmp_0\", dtype=self._dtype)\n\n def forward(self, input):\n out = self.linear(input)\n paddle.assign( out, self.back_var)\n\n return out\n\n \"\"\"\n if name is not None:\n var_name = \".\".join([self._full_name, name])\n else:\n var_name = unique_name.generate(\".\".join(\n [self._full_name, \"_generated_var\"]))\n\n return self._helper.main_program.current_block().create_var(\n name=var_name,\n persistable=persistable,\n dtype=dtype,\n type=core.VarDesc.VarType.LOD_TENSOR)\n\n def parameters(self, include_sublayers=True):\n \"\"\"Returns a list of all Parameters from current layer and its sub-layers.\n\n Returns:\n list of Tensor : a list of Parameters.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n linear = paddle.nn.Linear(1,1)\n print(linear.parameters()) # print linear_0.w_0 and linear_0.b_0\n\n \"\"\"\n ret = [\n param\n for _, param in self.named_parameters(\n include_sublayers=include_sublayers)\n ]\n return ret\n\n def children(self):\n \"\"\"Returns an iterator over immediate children layers.\n\n Yields:\n Layer: a child layer\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n linear1 = paddle.nn.Linear(10, 3)\n linear2 = paddle.nn.Linear(3, 10, bias_attr=False)\n model = paddle.nn.Sequential(linear1, linear2)\n\n layer_list = list(model.children())\n\n print(layer_list) # [<paddle.nn.layer.common.Linear object at 0x7f7b8113f830>, <paddle.nn.layer.common.Linear object at 0x7f7b8113f950>]\n\n \"\"\"\n for _, layer in self.named_children():\n yield layer\n\n def named_children(self):\n \"\"\"Returns an iterator over immediate children layers, yielding both\n the name of the layer as well as the layer itself.\n\n Yields:\n (string, Layer): Tuple containing a name and child layer\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n linear1 = paddle.nn.Linear(10, 3)\n linear2 = paddle.nn.Linear(3, 10, bias_attr=False)\n model = paddle.nn.Sequential(linear1, linear2)\n for prefix, layer in model.named_children():\n print(prefix, layer)\n # ('0', <paddle.nn.layer.common.Linear object at 0x7fb61ed85830>)\n # ('1', <paddle.nn.layer.common.Linear object at 0x7fb61ed85950>)\n\n \"\"\"\n memo = set()\n for name, layer in self._sub_layers.items():\n if layer is not None and layer not in memo:\n memo.add(layer)\n yield name, layer\n\n def sublayers(self, include_self=False):\n \"\"\"Returns a list of sub layers.\n\n Parameters:\n include_self(bool, optional): Whether return self as sublayers. Default: False\n\n Returns:\n list of Layer : a list of sub layers.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MyLayer(paddle.nn.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n self._linear = paddle.nn.Linear(1, 1)\n self._dropout = paddle.nn.Dropout(p=0.5)\n\n def forward(self, input):\n temp = self._linear(input)\n temp = self._dropout(temp)\n return temp\n\n mylayer = MyLayer()\n print(mylayer.sublayers()) # [<paddle.nn.layer.common.Linear object at 0x7f44b58977d0>, <paddle.nn.layer.common.Dropout object at 0x7f44b58978f0>]\n\n \"\"\"\n ret = [\n layer\n for _, layer in self.named_sublayers(include_self=include_self)\n ]\n return ret\n\n def named_parameters(self, prefix='', include_sublayers=True):\n \"\"\"\n Returns an iterator over all parameters in the Layer, yielding tuple of name and parameter.\n\n Parameters:\n prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.\n include_sublayers(bool, optional): Whether include the parameters of sublayers.\n If True, also include the named parameters from sublayers. Default: True.\n\n Yields:\n (string, Parameter): Tuple of name and Parameter\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n fc1 = paddle.nn.Linear(10, 3)\n fc2 = paddle.nn.Linear(3, 10, bias_attr=False)\n model = paddle.nn.Sequential(fc1, fc2)\n for name, param in model.named_parameters():\n print(name, param)\n\n \"\"\"\n params_set = set()\n named_sublayers = self.named_sublayers(\n prefix=prefix,\n include_self=True) if include_sublayers else zip([prefix], [self])\n for layer_prefix, sublayer in named_sublayers:\n params = sublayer._parameters.items()\n for key, param in params:\n if param is None or param in params_set:\n continue\n params_set.add(param)\n name = layer_prefix + ('.' if layer_prefix else '') + key\n yield name, param\n\n def named_sublayers(self, prefix='', include_self=False, layers_set=None):\n \"\"\"\n Returns an iterator over all sublayers in the Layer, yielding tuple of name and sublayer.\n The duplicate sublayer will only be yielded once.\n\n Parameters:\n prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.\n include_self(bool, optional): Whether include the Layer itself. Default: False.\n layers_set(set, optional): The set to record duplicate sublayers. Default: None.\n\n Yields:\n (string, Layer): Tuple of name and Layer\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n fc1 = paddle.nn.Linear(10, 3)\n fc2 = paddle.nn.Linear(3, 10, bias_attr=False)\n model = paddle.nn.Sequential(fc1, fc2)\n for prefix, layer in model.named_sublayers():\n print(prefix, layer)\n\n \"\"\"\n if layers_set is None:\n layers_set = set()\n if include_self and self not in layers_set:\n layers_set.add(self)\n yield prefix, self\n for key, layer in self._sub_layers.items():\n if layer is None:\n continue\n layer_prefix = prefix + ('.' if prefix else '') + key\n for p, l in layer.named_sublayers(\n prefix=layer_prefix, include_self=True,\n layers_set=layers_set):\n yield p, l\n\n def register_buffer(self, name, tensor, persistable=True):\n \"\"\"\n Registers a tensor as buffer into the layer.\n\n `buffer` is a non-trainable tensor and will not be updated by optimizer,\n but is necessary for evaluation and inference. For example, the mean and variance in BatchNorm layers.\n The registered buffer is persistable by default, and will be saved into\n `state_dict` alongside parameters. If set persistable=False, it registers\n a non-persistable buffer, so that it will not be a part of `state_dict` .\n\n Buffers can be accessed as attributes using given names.\n\n Parameters:\n name (string): name of the buffer. The buffer can be accessed\n from this layer using the given name\n tensor (Tensor): the tensor to be registered as buffer.\n persistable (bool): whether the buffer is part of this layer's\n state_dict.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n linear = paddle.nn.Linear(10, 3)\n value = np.array([0]).astype(\"float32\")\n buffer = paddle.to_tensor(value)\n linear.register_buffer(\"buf_name\", buffer, persistable=True)\n\n # get the buffer by attribute.\n print(linear.buf_name)\n\n \"\"\"\n\n if '_buffers' not in self.__dict__:\n raise ValueError(\n \"super(YourLayer, self).__init__() should be called first\")\n elif not isinstance(name, six.string_types):\n raise TypeError(\n \"The name of buffer should be a string, but received {}.\".\n format(type(name).__name__))\n elif '.' in name:\n raise KeyError(\n \"The name of buffer can not contain `.`, \"\n \"because when you access the newly added buffer in the \"\n \"form of `self.**.**`, it will cause AttributeError.\")\n elif name == '':\n raise KeyError(\"The name of buffer can not be empty.\")\n elif hasattr(self, name) and name not in self._buffers:\n raise KeyError(\"attribute '{}' already exists.\".format(name))\n elif tensor is not None and not type(tensor) == core.VarBase:\n raise TypeError(\n \"The registered buffer should be a core.VarBase, but received {}.\".\n format(type(tensor).__name__))\n else:\n self._buffers[name] = tensor\n if persistable:\n self._non_persistable_buffer_names_set.discard(name)\n else:\n self._non_persistable_buffer_names_set.add(name)\n\n def buffers(self, include_sublayers=True):\n \"\"\"\n Returns a list of all buffers from current layer and its sub-layers.\n\n Parameters:\n include_sublayers(bool, optional): Whether include the buffers of sublayers. If True, also include the buffers from sublayers. Default: True\n\n Returns:\n list of Tensor : a list of buffers.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n linear = paddle.nn.Linear(10, 3)\n value = np.array([0]).astype(\"float32\")\n buffer = paddle.to_tensor(value)\n linear.register_buffer(\"buf_name\", buffer, persistable=True)\n\n print(linear.buffers()) # == print([linear.buf_name])\n\n \"\"\"\n ret = [\n buffer\n for _, buffer in self.named_buffers(\n include_sublayers=include_sublayers)\n ]\n return ret\n\n def named_buffers(self, prefix='', include_sublayers=True):\n \"\"\"\n Returns an iterator over all buffers in the Layer, yielding tuple of name and Tensor.\n\n Parameters:\n prefix(str, optional): Prefix to prepend to all buffer names. Default: ''.\n include_sublayers(bool, optional): Whether include the buffers of sublayers.\n If True, also include the named buffers from sublayers. Default: True.\n\n Yields:\n (string, Tensor): Tuple of name and tensor\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n fc1 = paddle.nn.Linear(10, 3)\n buffer1 = paddle.to_tensor(np.array([0]).astype(\"float32\"))\n # register a tensor as buffer by specific `persistable`\n fc1.register_buffer(\"buf_name_1\", buffer1, persistable=True)\n\n fc2 = paddle.nn.Linear(3, 10)\n buffer2 = paddle.to_tensor(np.array([1]).astype(\"float32\"))\n # register a buffer by assigning an attribute with Tensor.\n # The `persistable` can only be False by this way.\n fc2.buf_name_2 = buffer2\n\n model = paddle.nn.Sequential(fc1, fc2)\n\n # get all named buffers\n for name, buffer in model.named_buffers():\n print(name, buffer)\n\n \"\"\"\n buffers_set = set()\n named_sublayers = self.named_sublayers(\n prefix=prefix,\n include_self=True) if include_sublayers else zip([prefix], [self])\n for layer_prefix, sublayer in named_sublayers:\n buffers = sublayer._buffers.items()\n for key, buffer in buffers:\n if buffer is None or buffer in buffers_set:\n continue\n buffers_set.add(buffer)\n name = layer_prefix + ('.' if layer_prefix else '') + key\n yield name, buffer\n\n def clear_gradients(self):\n \"\"\"\n Clear the gradients of all parameters for this layer.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n value = np.arange(26).reshape(2, 13).astype(\"float32\")\n a = paddle.to_tensor(value)\n linear = paddle.nn.Linear(13, 5)\n adam = paddle.optimizer.Adam(learning_rate=0.01,\n parameters=linear.parameters())\n out = linear(a)\n out.backward()\n adam.step()\n linear.clear_gradients()\n\n \"\"\"\n for p in self.parameters():\n if p.trainable:\n p.clear_gradient()\n\n def _build_once(self, *args, **kwargs):\n pass\n\n def _dygraph_call_func(self, *inputs, **kwargs):\n for forward_pre_hook in self._forward_pre_hooks.values():\n hook_result = forward_pre_hook(self, inputs)\n if hook_result is not None:\n if not isinstance(hook_result, tuple):\n hook_result = (hook_result, )\n inputs = hook_result\n\n if not self._built:\n with program_desc_tracing_guard(False):\n self._build_once(*inputs, **kwargs)\n\n # TODO(liuyuhui) Only xpu broadcast parameters here.\n # The other device is to call _sync_params_buffers in DataParallel\n # to realize the parameter synchronization among multiply cards.\n if parallel_helper._is_data_parallel_mode(\n ) and paddle.is_compiled_with_xpu():\n parallel_helper._broadcast_parameters(\n self._parameters.values())\n\n self._built = True\n\n outputs = self.forward(*inputs, **kwargs)\n\n for forward_post_hook in self._forward_post_hooks.values():\n hook_result = forward_post_hook(self, inputs, outputs)\n if hook_result is not None:\n outputs = hook_result\n\n return outputs\n\n def __call__(self, *inputs, **kwargs):\n # NOTE(Aurelius84): Why we still need param_guard here?\n # In case of ControlFlow, true_fn and false_fn will contain\n # parameters that may not trigger logic of `Operator` to create\n # them. we add this to make sure all parameters is available.\n\n if in_declarative_mode() and not framework.in_dygraph_mode():\n with param_guard(self._parameters), param_guard(self._buffers):\n return self._dygraph_call_func(*inputs, **kwargs)\n else:\n return self._dygraph_call_func(*inputs, **kwargs)\n\n def forward(self, *inputs, **kwargs):\n \"\"\"\n Defines the computation performed at every call.\n Should be overridden by all subclasses.\n\n Parameters:\n *inputs(tuple): unpacked tuple arguments\n **kwargs(dict): unpacked dict arguments\n \"\"\"\n raise NotImplementedError\n\n def backward(self, *inputs):\n raise ValueError(\"Layer shouldn't implement backward\")\n\n def add_sublayer(self, name, sublayer):\n \"\"\"Adds a sub Layer instance.\n\n Added sublayer can be accessed by self.name\n\n Parameters:\n name(str): name of this sublayer.\n sublayer(Layer): an instance of Layer.\n Returns:\n Layer: the sublayer passed in.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MySequential(paddle.nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n\n def forward(self, input):\n for layer in self._sub_layers.values():\n input = layer(input)\n return input\n\n fc1 = paddle.nn.Linear(10, 3)\n fc2 = paddle.nn.Linear(3, 10, bias_attr=False)\n model = MySequential(fc1, fc2)\n for prefix, layer in model.named_sublayers():\n print(prefix, layer)\n \"\"\"\n assert (isinstance(sublayer, Layer) or sublayer == None)\n\n self._sub_layers[name] = sublayer\n return sublayer\n\n def add_parameter(self, name, parameter):\n \"\"\"Adds a Parameter instance.\n\n Added parameter can be accessed by self.name\n\n Parameters:\n name(str): name of this sublayer.\n parameter(Parameter): an instance of Parameter.\n Returns:\n Parameter: the parameter passed in.\n Examples:\n .. code-block:: python\n\n import paddle\n\n class MyLayer(paddle.nn.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n self._linear = paddle.nn.Linear(1, 1)\n w_tmp = self.create_parameter([1,1])\n self.add_parameter(\"w_tmp\", w_tmp)\n\n def forward(self, input):\n return self._linear(input)\n\n mylayer = MyLayer()\n for name, param in mylayer.named_parameters():\n print(name, param) # will print w_tmp,_linear.weight,_linear.bias\n\n \"\"\"\n if '_parameters' not in self.__dict__:\n raise RuntimeError(\n \"super(YourLayer, self).__init__() should be called firstly.\")\n elif not isinstance(name, six.string_types):\n raise TypeError(\n \"The name of parameter should be a string, but received {}.\".\n format(type(name).__name__))\n elif '.' in name:\n raise KeyError(\n \"The name of parameter can not contain `.`, \"\n \"because when you access the newly added parameter in the \"\n \"form of `self.**.**`, it will cause AttributeError.\")\n elif name == '':\n raise KeyError(\"The name of parameter can not be empty.\")\n elif hasattr(self, name) and name not in self._parameters:\n raise KeyError(\"The parameter '{}' already exists.\".format(name))\n elif parameter is not None and not isinstance(parameter,\n framework.Parameter):\n raise TypeError(\n \"The parameter to be added should be a Parameter, but received {}.\".\n format(type(parameter).__name__))\n else:\n if parameter is None:\n self._parameters[name] = None\n\n if len(self._loaddict_holder) > 0:\n assert parameter.name in self._loaddict_holder, \"Parameter not found, Can't not find [ {} ] in state_dict\".format(\n parameter.name)\n\n parameter.set_value(self._loaddict_holder[parameter.name])\n\n self._parameters[name] = parameter\n return parameter\n\n def _set_op_attrs(self, attrs):\n \"\"\"\n Add customized attribute while append_op. In case of quantization, we want to save\n some attributes into op_desc while exporting inference model by @to_static.\n\n Arguments:\n attrs(dict): customized attributes that will be added into op_descs.\n\n NOTE: The interface is only exposed to developers.\n \"\"\"\n\n def is_already_registered(is_pre_hook):\n layers_hooks = self._forward_pre_hooks if is_pre_hook else self._forward_post_hooks\n candidate_hook = record_program_ops_pre_hook if is_pre_hook else set_op_customized_attrs_post_hook\n\n already_registed = False\n if layers_hooks:\n last_key = next(reversed(layers_hooks))\n already_registed = (layers_hooks[last_key] == candidate_hook)\n\n return already_registed\n\n if not isinstance(attrs, dict):\n raise TypeError(\"attrs should be type(dict), but received {}\".\n format(type(attrs).__name__))\n\n # NOTE: Overwrite behavior for same key.\n self._customized_attrs.update(attrs)\n\n if not is_already_registered(is_pre_hook=True):\n pre_hook_helper = self.register_forward_pre_hook(\n record_program_ops_pre_hook)\n assert len(self._op_recorder.hooks) == 0\n self._op_recorder.hooks = [pre_hook_helper]\n\n # manually register post_hook to ensure it is inserted into the head.\n if not is_already_registered(is_pre_hook=False):\n post_hook_helper = self.register_forward_post_hook(\n set_op_customized_attrs_post_hook)\n if len(self._forward_post_hooks) > 1:\n self._forward_post_hooks.move_to_end(\n post_hook_helper._hook_id, last=False)\n\n assert len(self._op_recorder.hooks) == 1\n\n # hooks that need to be removed once we finish executing them.\n self._op_recorder.hooks.append(post_hook_helper)\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def __getattr__(self, name):\n if '_parameters' in self.__dict__:\n _parameters = self.__dict__['_parameters']\n if name in self._parameters:\n return self._parameters[name]\n if '_sub_layers' in self.__dict__:\n _sub_layers = self.__dict__['_sub_layers']\n if name in self._sub_layers:\n return self._sub_layers[name]\n if '_buffers' in self.__dict__:\n _buffers = self.__dict__['_buffers']\n if name in _buffers:\n return _buffers[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name, value):\n def _remove_if_exist(*dicts):\n for d in dicts:\n if name in d:\n del d[name]\n\n if isinstance(getattr(type(self), name, None), property):\n object.__setattr__(self, name, value)\n params = self.__dict__.get('_parameters', None)\n if isinstance(value, framework.Parameter):\n if params is None:\n raise ValueError(\n \"super(YourLayer, self).__init__() should be called first\")\n if len(self._loaddict_holder) > 0:\n assert value.name in self._loaddict_holder, \"Parameter not found, Can't not find [ {} ] in state_dict\".format(\n value.name)\n\n value.set_value(self._loaddict_holder[value.name])\n\n _remove_if_exist(self.__dict__, self._buffers, self._sub_layers)\n params[name] = value\n elif params is not None and name in params:\n if value is not None:\n raise TypeError(\n \"assignment to parameter '{}' should be of type Parameter or None, but got '{}'\"\n .format(name, type(value).__name__))\n params[name] = None\n else:\n layers = self.__dict__.get('_sub_layers', None)\n if isinstance(value, Layer):\n if layers is None:\n raise ValueError(\n \"super(YourLayer, self).__init__() should be called first\"\n )\n\n _remove_if_exist(self.__dict__, self._parameters, self._buffers)\n layers[name] = value\n elif layers is not None and name in layers:\n if value is not None:\n raise TypeError(\n \"assignment to sublayer '{}' should be of type Layer or None, but got '{}'\"\n .format(name, type(value).__name__))\n layers[name] = None\n else:\n _buffers = self.__dict__.get('_buffers', None)\n if type(value) == core.VarBase:\n if _buffers is None:\n raise ValueError(\n \"super(YourLayer, self).__init__() should be called first\"\n )\n _remove_if_exist(self.__dict__, self._parameters,\n self._sub_layers)\n # Set persistable=False by default. Only `register_buffer` can\n # add a persistable buffer.\n if name not in self._buffers:\n self._non_persistable_buffer_names_set.add(name)\n _buffers[name] = value\n elif _buffers is not None and name in _buffers:\n # Note(Aurelius84): In Dy2stat, the value of the Buffer may be modified in\n # decorated function, such as `self.buffer = new_tensor`. So we update its\n # value via `assign`.\n if type(value) == framework.Variable:\n from paddle import assign\n # Note(zhhsplendid): the condition below happens in PaddleGan model,\n # but should all non-Variable _buffers[name] be re-assign? We\n # should consider it in the future. I current wrote this as\n # conservative code.\n if _buffers[name] is None or type(_buffers[\n name]) == core.VarBase:\n _buffers[name] = assign(value)\n else:\n assign(value, _buffers[name])\n elif value is not None:\n raise TypeError(\n \"assignment to buffers '{}' should be of type core.VarBase or None, but got '{}'\"\n .format(name, type(value).__name__))\n else:\n # Assigning None will remove the buffer, but if re-assign a new varBase to it,\n # it will be remarked as a buffer with same `persistable` attribute.\n _buffers[name] = None\n else:\n object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if name in self._parameters:\n del self._parameters[name]\n elif name in self._sub_layers:\n del self._sub_layers[name]\n elif name in self._buffers:\n del self._buffers[name]\n self._non_persistable_buffer_names_set.discard(name)\n else:\n object.__delattr__(self, name)\n\n def __dir__(self):\n \"\"\"\n Return a list. Get all parameters, buffers(non-parameter tensors), sublayers, method and attr of Layer.\n\n Examples:\n .. code-block:: python\n import paddle\n import numpy as np\n\n class Mylayer(paddle.nn.Layer):\n def __init__(self):\n super(Mylayer, self).__init__()\n self.linear1 = paddle.nn.Linear(10, 10)\n self.linear2 = paddle.nn.Linear(5, 5)\n self.conv2d = paddle.nn.Conv2D(3, 2, 3)\n self.embedding = paddle.nn.Embedding(128, 16)\n self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))\n\n mylayer = Mylayer()\n print(dir(mylayer))\n # only parts are shown, because of list have too much content\n # ['__call__', '__class__', ... , 'conv2d', 'embedding', 'h_0', 'linear1', 'linear2', ... , 'sublayers', 'train']\n\n \"\"\"\n method = dir(self.__class__)\n attrs = list(self.__dict__.keys())\n parameters = list(self._parameters.keys())\n sublayers = list(self._sub_layers.keys())\n buffers = list(self._buffers.keys())\n\n keys = method + attrs + parameters + sublayers + buffers\n\n return keys\n\n def extra_repr(self):\n \"\"\"\n Extra representation of this layer, you can have custom implementation\n of your own layer.\n \"\"\"\n return ''\n\n def __repr__(self):\n extra_lines = []\n extra_repr = self.extra_repr()\n extra_lines = extra_repr.split('\\n')\n sublayer_lines = []\n for name, layer in self._sub_layers.items():\n sublayer_str = repr(layer)\n sublayer_str = _addindent(sublayer_str, 2)\n sublayer_lines.append('(' + name + '): ' + sublayer_str)\n\n final_str = self.__class__.__name__ + '('\n if extra_lines:\n if len(extra_lines) > 1:\n final_str += '\\n ' + '\\n '.join(extra_lines) + '\\n'\n elif len(extra_lines) == 1:\n final_str += extra_lines[0]\n if sublayer_lines:\n final_str += '\\n ' + '\\n '.join(sublayer_lines) + '\\n'\n\n final_str += ')'\n return final_str\n\n def register_state_dict_hook(self, hook):\n hook_remove_helper = HookRemoveHelper(self._state_dict_hooks)\n self._state_dict_hooks[hook_remove_helper._hook_id] = hook\n return hook_remove_helper\n\n def _state_dict_impl(self,\n destination=None,\n include_sublayers=True,\n structured_name_prefix=\"\",\n include_non_persistable_buffer=False):\n \"\"\"\n Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict\n\n Parameters:\n destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None\n include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True\n include_non_persistable_buffer(bool, optional): If true, include non persistable buffers of current layer and its sub-layers, it is used in pure fp16 and jit.save. Default: False\n \"\"\"\n\n if destination is None:\n destination = collections.OrderedDict()\n for name, data in self._parameters.items():\n if data is not None:\n destination[structured_name_prefix + name] = data\n for name, buffer in self._buffers.items():\n if not include_non_persistable_buffer:\n if buffer is not None and name not in self._non_persistable_buffer_names_set:\n destination[structured_name_prefix + name] = buffer\n else:\n if buffer is not None:\n destination[structured_name_prefix + name] = buffer\n\n if include_sublayers:\n for layer_name, layer_item in self._sub_layers.items():\n if layer_item is not None:\n destination_temp = destination.copy()\n destination_temp.update(\n layer_item._state_dict_impl(\n destination_temp, include_sublayers,\n structured_name_prefix + layer_name + \".\",\n include_non_persistable_buffer))\n destination = destination_temp\n\n for state_dict_hook in self._state_dict_hooks.values():\n hook_result = state_dict_hook(destination)\n if hook_result is not None:\n destination = hook_result\n\n return destination\n\n def to_static_state_dict(self,\n destination=None,\n include_sublayers=True,\n structured_name_prefix=\"\"):\n '''\n Get all parameters and buffers of current layer and its sub-layers. And set them into a dict\n\n Parameters:\n destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None\n include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True\n\n Retruns:\n dict: a dict contains all the parameters and persistable buffers.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n emb = paddle.nn.Embedding(10, 10)\n\n state_dict = emb.to_static_state_dict()\n paddle.save( state_dict, \"paddle_dy.pdparams\")\n\n '''\n return self._state_dict_impl(\n destination=destination,\n include_sublayers=include_sublayers,\n structured_name_prefix=structured_name_prefix,\n include_non_persistable_buffer=True)\n\n def state_dict(self,\n destination=None,\n include_sublayers=True,\n structured_name_prefix=\"\"):\n '''\n Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict\n\n Parameters:\n destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None\n include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True\n\n Retruns:\n dict: a dict contains all the parameters and persistable buffers.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n emb = paddle.nn.Embedding(10, 10)\n\n state_dict = emb.state_dict()\n paddle.save( state_dict, \"paddle_dy.pdparams\")\n\n '''\n return self._state_dict_impl(\n destination=destination,\n include_sublayers=include_sublayers,\n structured_name_prefix=structured_name_prefix,\n include_non_persistable_buffer=False)\n\n @framework.deprecate_stat_dict\n def set_state_dict(self, state_dict, use_structured_name=True):\n '''\n Set parameters and persistable buffers from state_dict. All the parameters and buffers will be reset by the tensor in the state_dict\n\n Parameters:\n state_dict(dict) : Dict contains all the parameters and persistable buffers.\n use_structured_name(bool, optional) : If true, use structured name as key, otherwise, use parameter or buffer name as key.\n Default: True\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n emb = paddle.nn.Embedding(10, 10)\n\n state_dict = emb.state_dict()\n paddle.save(state_dict, \"paddle_dy.pdparams\")\n para_state_dict = paddle.load(\"paddle_dy.pdparams\")\n emb.set_state_dict(para_state_dict)\n\n '''\n\n def _check_match(key, param):\n state = state_dict.get(key, None)\n if state is None:\n raise ValueError(\"{} is not found in the provided dict.\".format(\n key))\n if (isinstance(state, dict) or isinstance(state, list)):\n if (len(state) != len(param)):\n raise ValueError(\"{} receieves the length of {}, \"\n \"but the expected shape is {}\".format(\n key, len(state), len(param)))\n else:\n return param, state\n else:\n state_shape = state.shape() if inspect.ismethod(\n state.shape) else state.shape\n\n if list(state_shape) != list(param.shape):\n raise ValueError(\n \"{} receives a shape {}, but the expected shape is {}.\".\n format(key, list(state_shape), list(param.shape)))\n return param, state\n\n matched_param_state = []\n for key, param in self.state_dict().items():\n key_name = key if use_structured_name else param.name\n try:\n match_res = _check_match(key_name, param)\n matched_param_state.append(match_res)\n except ValueError as err:\n warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n\n if in_dygraph_mode():\n for param, state in matched_param_state:\n param.set_value(state)\n else:\n\n def _set_var(var, ndarray):\n t = global_scope().find_var(var.name).get_tensor()\n p = t._place()\n if p.is_cpu_place():\n place = core.CPUPlace()\n elif p.is_cuda_pinned_place():\n place = core.CUDAPinnedPlace()\n elif p.is_xpu_place():\n p = core.Place()\n p.set_place(t._place())\n place = core.XPUPlace(p.xpu_device_id())\n else:\n p = core.Place()\n p.set_place(t._place())\n place = core.CUDAPlace(p.gpu_device_id())\n t.set(ndarray, place)\n\n executor = Executor(_get_device())._default_executor\n # restore parameter states\n core._create_loaded_parameter(\n [param for param, state in matched_param_state],\n global_scope(), executor)\n for param, state in matched_param_state:\n _set_var(param, state)\n\n def _apply(self, func, device, dtype, blocking):\n for layer in self.children():\n layer._apply(func, device, dtype, blocking)\n\n for key, param in self._parameters.items():\n if param is not None:\n with no_grad():\n param_applied = func(param, device, dtype, blocking)\n\n if param.grad is not None:\n with no_grad():\n grad_applied = func(param._grad_ivar(), device, dtype,\n blocking)\n\n for key, buf in self._buffers.items():\n self._buffers[key] = func(buf, device, dtype, blocking)\n\n def to(self, device=None, dtype=None, blocking=None):\n '''\n Cast the parameters and buffers of Layer by the give device, dtype and blocking.\n\n Parameters:\n device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.\n If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the\n index of the GPUs or XPUs. Default: None.\n\n dtype(str|core.VarDesc.VarType|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.\n\n blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be\n asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.\n\n Returns:\n self\n\n Examples:\n .. code-block:: python\n\n # required: gpu\n import paddle\n\n linear=paddle.nn.Linear(2, 2)\n linear.weight\n #Parameter containing:\n #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n # [[-0.32770029, 0.38653070],\n # [ 0.46030545, 0.08158520]])\n\n linear.to(dtype='float64')\n linear.weight\n #Tenor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=False,\n # [[-0.32770029, 0.38653070],\n # [ 0.46030545, 0.08158520]])\n\n linear.to(device='cpu')\n linear.weight\n #Tensor(shape=[2, 2], dtype=float64, place=CPUPlace, stop_gradient=False,\n # [[-0.32770029, 0.38653070],\n # [ 0.46030545, 0.08158520]])\n linear.to(device=paddle.CUDAPinnedPlace(), blocking=False)\n linear.weight\n #Tensor(shape=[2, 2], dtype=float64, place=CUDAPinnedPlace, stop_gradient=False,\n # [[-0.04989364, -0.56889004],\n # [ 0.33960250, 0.96878713]])\n\n\n '''\n\n if device is None and dtype is None and blocking is None:\n return self\n\n if device is not None:\n if isinstance(device, str):\n device = paddle.device._convert_to_place(device)\n elif isinstance(device, (core.CPUPlace, core.CUDAPlace,\n core.CUDAPinnedPlace, core.XPUPlace)):\n pass\n else:\n raise ValueError(\n \"device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is \"\n + type(device).__name__)\n\n if blocking is None:\n blocking = True\n else:\n assert isinstance(\n blocking,\n bool), \"blocking value error, must be the True, False or None\"\n\n def transform(t, device, dtype, blocking):\n if device is None:\n device = t.place\n if dtype is None:\n dtype = t.dtype\n\n if type(dtype) is str:\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n # 1. gpu place need to determine whether the memory is sufficient for allocation:\n if t.place.is_gpu_place():\n # for gpu, minimum memory allocation unit is 256 bytes.\n size_dtype = core.size_of_dtype(dtype)\n # Note(zhangbo): Paddle GPU minimum memory allocation unit is 256 bytes, waiting_alloc_memory will comput ‘t’ occupied memory space.\n # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough.\n waiting_alloc_memory = (\n (np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2\n gpu_memory_available = core.gpu_memory_available()\n if gpu_memory_available < waiting_alloc_memory:\n # Copy param / Tensor to cpu\n t_used = t._copy_to(paddle.CPUPlace(),\n blocking) # k-v type will error\n # Release mem of t\n t.value().get_tensor()._clear()\n else:\n t_used = t\n else:\n t_used = t\n\n # 2. cast param / Tensor to dtype\n if dtype is not None and dtype != t_used.dtype:\n with paddle.fluid.framework._dygraph_place_guard(\n place=t_used.place):\n t_casted = t_used.cast(dtype=dtype)\n else:\n t_casted = t_used\n\n # 3. Copy casted cpu param / Tensor to device\n if device is not None and not t_casted.place._equals(device):\n new_t = t_casted._copy_to(device, blocking)\n else:\n new_t = t_casted\n\n # 4. share Tensor to origin param / Tensor\n dst_tensor = t.value().get_tensor()\n src_tensor = new_t.value().get_tensor()\n dst_tensor._share_data_with(src_tensor)\n\n return t\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n self._apply(transform, device, dtype, blocking)\n\n self._dtype = dtype\n return self\n\n # [aliases] Compatible with old method names\n set_dict = set_state_dict\n load_dict = set_state_dict\n"
]
| [
[
"numpy.prod"
]
]
|
icoolworld/gpt2 | [
"cdd8d28362bab738fc3a47145335bf79679bead3"
]
| [
"train.py"
]
| [
"# -*- coding: UTF-8 -*-\nimport transformers\nimport torch\nimport os\nimport json\nimport random\nimport numpy as np\nimport argparse\nfrom torch.utils.tensorboard import SummaryWriter\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom torch.nn import DataParallel\nfrom tokenizations.bpe_tokenizer import get_encoder\n\n\ndef build_files(data_path, tokenized_data_path, num_pieces, full_tokenizer, min_length):\n with open(data_path, \"r\", encoding=\"utf8\") as f:\n print(\"reading lines\")\n lines = json.load(f)\n lines = [\n line.replace(\"\\n\", \" [SEP] \") for line in lines\n ] # 用[SEP]表示换行, 段落之间使用SEP表示段落结束\n all_len = len(lines)\n if not os.path.exists(tokenized_data_path):\n os.mkdir(tokenized_data_path)\n for i in tqdm(range(num_pieces)):\n sublines = lines[all_len // num_pieces * i : all_len // num_pieces * (i + 1)]\n if i == num_pieces - 1:\n sublines.extend(\n lines[all_len // num_pieces * (i + 1) :]\n ) # 把尾部例子添加到最后一个piece\n sublines = [\n full_tokenizer.tokenize(line) for line in sublines if len(line) > min_length\n ] # 只考虑长度超过min_length的句子\n sublines = [full_tokenizer.convert_tokens_to_ids(line) for line in sublines]\n full_line = []\n for subline in sublines:\n full_line.append(\n full_tokenizer.convert_tokens_to_ids(\"[MASK]\")\n ) # 文章开头添加MASK表示文章开始\n full_line.extend(subline)\n full_line.append(\n full_tokenizer.convert_tokens_to_ids(\"[CLS]\")\n ) # 文章之间添加CLS表示文章结束\n with open(tokenized_data_path + \"tokenized_train_{}.txt\".format(i), \"w\") as f:\n for id in full_line:\n f.write(str(id) + \" \")\n print(\"finish\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--device\", default=\"0,1,2,3\", type=str, required=False, help=\"设置使用哪些显卡\"\n )\n parser.add_argument(\n \"--model_config\",\n default=\"config/model_config_small.json\",\n type=str,\n required=False,\n help=\"选择模型参数\",\n )\n parser.add_argument(\n \"--tokenizer_path\",\n default=\"cache/vocab_small.txt\",\n type=str,\n required=False,\n help=\"选择词库\",\n )\n parser.add_argument(\n \"--raw_data_path\",\n default=\"data/train.json\",\n type=str,\n required=False,\n help=\"原始训练语料\",\n )\n parser.add_argument(\n \"--tokenized_data_path\",\n default=\"data/tokenized/\",\n type=str,\n required=False,\n help=\"tokenized语料存放位置\",\n )\n parser.add_argument(\"--raw\", action=\"store_true\", help=\"是否先做tokenize\")\n parser.add_argument(\"--epochs\", default=5, type=int, required=False, help=\"训练循环\")\n parser.add_argument(\n \"--batch_size\", default=8, type=int, required=False, help=\"训练batch size\"\n )\n parser.add_argument(\"--lr\", default=1.5e-4, type=float, required=False, help=\"学习率\")\n parser.add_argument(\n \"--warmup_steps\", default=2000, type=int, required=False, help=\"warm up步数\"\n )\n parser.add_argument(\n \"--log_step\",\n default=1,\n type=int,\n required=False,\n help=\"多少步汇报一次loss,设置为gradient accumulation的整数倍\",\n )\n parser.add_argument(\n \"--stride\", default=768, type=int, required=False, help=\"训练时取训练数据的窗口步长\"\n )\n parser.add_argument(\n \"--gradient_accumulation\", default=1, type=int, required=False, help=\"梯度积累\"\n )\n parser.add_argument(\"--fp16\", action=\"store_true\", help=\"混合精度\")\n parser.add_argument(\"--fp16_opt_level\", default=\"O1\", type=str, required=False)\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, required=False)\n parser.add_argument(\n \"--num_pieces\", default=100, type=int, required=False, help=\"将训练语料分成多少份\"\n )\n parser.add_argument(\n \"--min_length\", default=128, type=int, required=False, help=\"最短收录文章长度\"\n )\n parser.add_argument(\n \"--output_dir\", default=\"model/\", type=str, required=False, help=\"模型输出路径\"\n )\n parser.add_argument(\n \"--pretrained_model\", default=\"\", type=str, required=False, help=\"模型训练起点路径\"\n )\n parser.add_argument(\n \"--writer_dir\",\n default=\"tensorboard_summary/\",\n type=str,\n required=False,\n help=\"Tensorboard路径\",\n )\n parser.add_argument(\"--segment\", action=\"store_true\", help=\"中文以词为单位\")\n parser.add_argument(\"--bpe_token\", action=\"store_true\", help=\"subword\")\n parser.add_argument(\n \"--encoder_json\",\n default=\"tokenizations/encoder.json\",\n type=str,\n help=\"encoder.json\",\n )\n parser.add_argument(\n \"--vocab_bpe\", default=\"tokenizations/vocab.bpe\", type=str, help=\"vocab.bpe\"\n )\n\n args = parser.parse_args()\n print(\"args:\\n\" + args.__repr__())\n\n if args.segment:\n from tokenizations import tokenization_bert_word_level as tokenization_bert\n else:\n from tokenizations import tokenization_bert\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device # 此处设置程序使用哪些显卡\n\n model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(\n args.model_config\n )\n print(\"config:\\n\" + model_config.to_json_string())\n\n n_ctx = model_config.n_ctx\n if args.bpe_token:\n full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)\n else:\n full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)\n full_tokenizer.max_len = 999999\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(\"using device:\", device)\n\n raw_data_path = args.raw_data_path\n tokenized_data_path = args.tokenized_data_path\n raw = args.raw # 选择是否从零开始构建数据集\n epochs = args.epochs\n batch_size = args.batch_size\n lr = args.lr\n warmup_steps = args.warmup_steps\n log_step = args.log_step\n stride = args.stride\n gradient_accumulation = args.gradient_accumulation\n fp16 = args.fp16 # 不支持半精度的显卡请勿打开\n fp16_opt_level = args.fp16_opt_level\n max_grad_norm = args.max_grad_norm\n num_pieces = args.num_pieces\n min_length = args.min_length\n output_dir = args.output_dir\n tb_writer = SummaryWriter(log_dir=args.writer_dir)\n assert log_step % gradient_accumulation == 0\n\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n if raw:\n print(\"building files\")\n build_files(\n data_path=raw_data_path,\n tokenized_data_path=tokenized_data_path,\n num_pieces=num_pieces,\n full_tokenizer=full_tokenizer,\n min_length=min_length,\n )\n print(\"files built\")\n\n if not args.pretrained_model:\n model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)\n else:\n model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(\n args.pretrained_model\n )\n model.train()\n model.to(device)\n\n num_parameters = 0\n parameters = model.parameters()\n for parameter in parameters:\n num_parameters += parameter.numel()\n print(\"number of parameters: {}\".format(num_parameters))\n\n multi_gpu = False\n full_len = 0\n print(\"calculating total steps\")\n for i in tqdm(range(num_pieces)):\n with open(tokenized_data_path + \"tokenized_train_{}.txt\".format(i), \"r\") as f:\n full_len += len([int(item) for item in f.read().strip().split()])\n total_steps = int(full_len / stride * epochs / batch_size / gradient_accumulation)\n print(\"total steps = {}\".format(total_steps))\n\n optimizer = transformers.AdamW(model.parameters(), lr=lr, correct_bias=True)\n scheduler = transformers.WarmupLinearSchedule(\n optimizer, warmup_steps=warmup_steps, t_total=total_steps\n )\n if fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\"\n )\n model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = DataParallel(model, device_ids=[int(i) for i in args.device.split(\",\")])\n multi_gpu = True\n print(\"starting training\")\n overall_step = 0\n running_loss = 0\n for epoch in range(epochs):\n print(\"epoch {}\".format(epoch + 1))\n now = datetime.now()\n print(\"time: {}\".format(now))\n x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)\n random.shuffle(x)\n piece_num = 0\n for i in x:\n with open(\n tokenized_data_path + \"tokenized_train_{}.txt\".format(i), \"r\"\n ) as f:\n line = f.read().strip()\n tokens = line.split()\n tokens = [int(token) for token in tokens]\n start_point = 0\n samples = []\n while start_point < len(tokens) - n_ctx:\n samples.append(tokens[start_point : start_point + n_ctx])\n start_point += stride\n if start_point < len(tokens):\n samples.append(tokens[len(tokens) - n_ctx :])\n random.shuffle(samples)\n for step in range(len(samples) // batch_size): # drop last\n\n # prepare data\n batch = samples[step * batch_size : (step + 1) * batch_size]\n batch_inputs = []\n for ids in batch:\n int_ids = [int(x) for x in ids]\n batch_inputs.append(int_ids)\n batch_inputs = torch.tensor(batch_inputs).long().to(device)\n\n # forward pass\n outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs)\n loss, logits = outputs[:2]\n\n # get loss\n if multi_gpu:\n loss = loss.mean()\n if gradient_accumulation > 1:\n loss = loss / gradient_accumulation\n\n # loss backward\n if fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(optimizer), max_grad_norm\n )\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n\n # optimizer step\n if (overall_step + 1) % gradient_accumulation == 0:\n running_loss += loss.item()\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (overall_step + 1) % log_step == 0:\n tb_writer.add_scalar(\n \"loss\", loss.item() * gradient_accumulation, overall_step\n )\n print(\n \"now time: {}:{}. Step {} of piece {} of epoch {}, loss {}\".format(\n datetime.now().hour,\n datetime.now().minute,\n step + 1,\n piece_num,\n epoch + 1,\n running_loss\n * gradient_accumulation\n / (log_step / gradient_accumulation),\n )\n )\n running_loss = 0\n overall_step += 1\n piece_num += 1\n\n print(\"saving model for epoch {}\".format(epoch + 1))\n if not os.path.exists(output_dir + \"model_epoch{}\".format(epoch + 1)):\n os.mkdir(output_dir + \"model_epoch{}\".format(epoch + 1))\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir + \"model_epoch{}\".format(epoch + 1))\n # torch.save(scheduler.state_dict(), output_dir + 'model_epoch{}/scheduler.pt'.format(epoch + 1))\n # torch.save(optimizer.state_dict(), output_dir + 'model_epoch{}/optimizer.pt'.format(epoch + 1))\n print(\"epoch {} finished\".format(epoch + 1))\n\n then = datetime.now()\n print(\"time: {}\".format(then))\n print(\"time for one epoch: {}\".format(then - now))\n\n print(\"training finished\")\n if not os.path.exists(output_dir + \"final_model\"):\n os.mkdir(output_dir + \"final_model\")\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir + \"final_model\")\n # torch.save(scheduler.state_dict(), output_dir + 'final_model/scheduler.pt')\n # torch.save(optimizer.state_dict(), output_dir + 'final_model/optimizer.pt')\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"numpy.linspace",
"torch.utils.tensorboard.SummaryWriter"
]
]
|
hidden-ar/OpenSfM | [
"3ea1216d4dedc94b93ea9f7aa51cd8efd7377922"
]
| [
"opensfm/reconstruction_helpers.py"
]
| [
"import logging\nimport math\nfrom typing import Optional, List, Dict, Any, Iterable\n\nimport numpy as np\nfrom opensfm import (\n geo,\n multiview,\n pygeometry,\n pymap,\n geometry,\n types,\n exif as oexif,\n rig,\n)\nfrom opensfm.dataset_base import DataSetBase\n\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\ndef guess_acceleration_from_orientation_tag(orientation: int) -> List[float]:\n \"\"\"Guess upward vector in camera coordinates given the orientation tag.\n\n Assumes camera is looking towards the horizon and horizon is horizontal\n on the image when taking in to account the orientation tag.\n \"\"\"\n # See http://sylvana.net/jpegcrop/exif_orientation.html\n if orientation == 1:\n return [0, -1, 0]\n if orientation == 2:\n return [0, -1, 0]\n if orientation == 3:\n return [0, 1, 0]\n if orientation == 4:\n return [0, 1, 0]\n if orientation == 5:\n return [-1, 0, 0]\n if orientation == 6:\n return [-1, 0, 0]\n if orientation == 7:\n return [1, 0, 0]\n if orientation == 8:\n return [1, 0, 0]\n raise RuntimeError(f\"Error: Unknown orientation tag: {orientation}\")\n\n\ndef orientation_from_acceleration_in_image_axis(x: float, y: float) -> int:\n \"\"\"Return the orientation tag corresponding to an acceleration\"\"\"\n if y <= -(np.fabs(x)):\n return 1\n elif y >= np.fabs(x):\n return 3\n elif x <= -(np.fabs(y)):\n return 6\n elif x >= np.fabs(y):\n return 8\n else:\n raise RuntimeError(f\"Error: Invalid acceleration {x}, {y}!\")\n\n\ndef transform_acceleration_from_phone_to_image_axis(\n x: float, y: float, z: float, orientation: int\n) -> List[float]:\n \"\"\"Compute acceleration in image axis.\n\n Orientation tag is used to ensure that the resulting acceleration points\n downwards in the image. This validation is not needed if the orientation\n tag is the one from the original picture. It is only required when\n the image has been rotated with respect to the original and the orientation\n tag modified accordingly.\n \"\"\"\n assert orientation in [1, 3, 6, 8]\n\n # Orientation in image axis assuming image has not been transformed\n length = np.sqrt(x * x + y * y + z * z)\n if length < 3: # Assume IOS device since gravity is 1 in there\n ix, iy, iz = y, -x, z\n else: # Assume Android device since gravity is 9.8 in there\n ix, iy, iz = -y, -x, -z\n\n for _ in range(4):\n if orientation == orientation_from_acceleration_in_image_axis(ix, iy):\n break\n else:\n ix, iy = -iy, ix\n\n return [ix, iy, iz]\n\n\ndef shot_acceleration_in_image_axis(shot: pymap.Shot) -> Optional[List[float]]:\n \"\"\"Get or guess shot's acceleration.\"\"\"\n if not shot.metadata.orientation.has_value:\n return None\n\n orientation = shot.metadata.orientation.value\n if not 1 <= orientation <= 8:\n logger.error(\n \"Unknown orientation tag {} for image {}\".format(orientation, shot.id)\n )\n orientation = 1\n\n if shot.metadata.accelerometer.has_value:\n x, y, z = shot.metadata.accelerometer.value\n if x != 0 or y != 0 or z != 0:\n return transform_acceleration_from_phone_to_image_axis(x, y, z, orientation)\n return guess_acceleration_from_orientation_tag(orientation)\n\n\ndef rotation_from_shot_metadata(shot: pymap.Shot) -> Optional[np.ndarray]:\n rotation = rotation_from_angles(shot)\n if rotation is None:\n rotation = rotation_from_orientation_compass(shot)\n return rotation\n\n\ndef rotation_from_orientation_compass(shot: pymap.Shot) -> Optional[np.ndarray]:\n up_vector = shot_acceleration_in_image_axis(shot)\n if up_vector is None:\n return None\n if shot.metadata.compass_angle.has_value:\n angle = shot.metadata.compass_angle.value\n else:\n angle = 0.0\n return multiview.rotation_matrix_from_up_vector_and_compass(up_vector, angle)\n\n\ndef rotation_from_angles(shot: pymap.Shot) -> Optional[np.ndarray]:\n if not shot.metadata.opk_angles.has_value:\n return None\n opk_degrees = shot.metadata.opk_angles.value\n opk_rad = map(math.radians, opk_degrees)\n return geometry.rotation_from_opk(*opk_rad)\n\n\ndef reconstruction_from_metadata(\n data: DataSetBase, images: Iterable[str]\n) -> types.Reconstruction:\n \"\"\"Initialize a reconstruction by using EXIF data for constructing shot poses and cameras.\"\"\"\n data.init_reference()\n rig_assignments = rig.rig_assignments_per_image(data.load_rig_assignments())\n\n reconstruction = types.Reconstruction()\n reconstruction.reference = data.load_reference()\n reconstruction.cameras = data.load_camera_models()\n for image in images:\n camera_id = data.load_exif(image)[\"camera\"]\n\n if image in rig_assignments:\n rig_instance_id, rig_camera_id, _ = rig_assignments[image]\n else:\n rig_instance_id = image\n rig_camera_id = camera_id\n\n reconstruction.add_rig_camera(pymap.RigCamera(pygeometry.Pose(), rig_camera_id))\n reconstruction.add_rig_instance(pymap.RigInstance(rig_instance_id))\n shot = reconstruction.create_shot(\n shot_id=image,\n camera_id=camera_id,\n rig_camera_id=rig_camera_id,\n rig_instance_id=rig_instance_id,\n )\n\n shot.metadata = get_image_metadata(data, image)\n\n if not shot.metadata.gps_position.has_value:\n reconstruction.remove_shot(image)\n continue\n gps_pos = shot.metadata.gps_position.value\n\n rotation = rotation_from_shot_metadata(shot)\n if rotation is not None:\n shot.pose.set_rotation_matrix(rotation)\n shot.pose.set_origin(gps_pos)\n shot.scale = 1.0\n return reconstruction\n\n\ndef exif_to_metadata(\n exif: Dict[str, Any], use_altitude: bool, reference: types.TopocentricConverter\n) -> pymap.ShotMeasurements:\n \"\"\"Construct a metadata object from raw EXIF tags (as a dict).\"\"\"\n metadata = pymap.ShotMeasurements()\n\n gps = exif.get(\"gps\")\n if gps and \"latitude\" in gps and \"longitude\" in gps:\n lat, lon = gps[\"latitude\"], gps[\"longitude\"]\n if use_altitude:\n alt = min([oexif.maximum_altitude, gps.get(\"altitude\", 2.0)])\n else:\n alt = 2.0 # Arbitrary value used to align the reconstruction\n x, y, z = reference.to_topocentric(lat, lon, alt)\n metadata.gps_position.value = np.array([x, y, z])\n metadata.gps_accuracy.value = gps.get(\"dop\", 15.0)\n if metadata.gps_accuracy.value == 0.0:\n metadata.gps_accuracy.value = 15.0\n\n opk = exif.get(\"opk\")\n if opk and \"omega\" in opk and \"phi\" in opk and \"kappa\" in opk:\n omega, phi, kappa = opk[\"omega\"], opk[\"phi\"], opk[\"kappa\"]\n metadata.opk_angles.value = np.array([omega, phi, kappa])\n metadata.opk_accuracy.value = opk.get(\"accuracy\", 1.0)\n\n metadata.orientation.value = exif.get(\"orientation\", 1)\n\n if \"accelerometer\" in exif:\n metadata.accelerometer.value = exif[\"accelerometer\"]\n\n if \"compass\" in exif:\n metadata.compass_angle.value = exif[\"compass\"][\"angle\"]\n if \"accuracy\" in exif[\"compass\"]:\n metadata.compass_accuracy.value = exif[\"compass\"][\"accuracy\"]\n\n if \"capture_time\" in exif:\n metadata.capture_time.value = exif[\"capture_time\"]\n\n if \"skey\" in exif:\n metadata.sequence_key.value = exif[\"skey\"]\n\n return metadata\n\n\ndef get_image_metadata(data: DataSetBase, image: str) -> pymap.ShotMeasurements:\n \"\"\"Get image metadata as a ShotMetadata object.\"\"\"\n exif = data.load_exif(image)\n reference = data.load_reference()\n return exif_to_metadata(exif, data.config[\"use_altitude_tag\"], reference)\n"
]
| [
[
"numpy.array",
"numpy.fabs",
"numpy.sqrt"
]
]
|
acul3/transformers | [
"b38a9251c7c3105734dd61edea84d70fe98943b1"
]
| [
"src/transformers/models/blenderbot/modeling_blenderbot.py"
]
| [
"# coding=utf-8\n# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Blenderbot model. \"\"\"\n\n\nimport math\nimport os\nimport random\nimport warnings\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPastAndCrossAttentions,\n Seq2SeqLMOutput,\n Seq2SeqModelOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel\nfrom .configuration_blenderbot import BlenderbotConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BlenderbotConfig\"\n_TOKENIZER_FOR_DOC = \"BlenderbotTokenizer\"\n\n\nBLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/blenderbot-3B\",\n # See all Blenderbot models at https://huggingface.co/models?filter=blenderbot\n]\n\n\n# Copied from transformers.models.bart.modeling_bart.shift_tokens_right\ndef shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids\n\n\n# Copied from transformers.models.bart.modeling_bart._make_causal_mask\ndef _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = torch.full((tgt_len, tgt_len), float(\"-inf\"))\n mask_cond = torch.arange(mask.size(-1))\n mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)\n mask = mask.to(dtype)\n\n if past_key_values_length > 0:\n mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)\n return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)\n\n\n# Copied from transformers.models.bart.modeling_bart._expand_mask\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)\n\n\nclass BlenderbotLearnedPositionalEmbedding(nn.Embedding):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):\n assert padding_idx is not None, \"`padding_idx` should not be None, but of type int\"\n super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)\n\n def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):\n \"\"\"`input_ids_shape` is expected to be [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids_shape[:2]\n positions = torch.arange(\n past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device\n )\n return super().forward(positions)\n\n\n# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Blenderbot\nclass BlenderbotAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n assert attn_weights.size() == (\n bsz * self.num_heads,\n tgt_len,\n src_len,\n ), f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"\n\n if attention_mask is not None:\n assert attention_mask.size() == (\n bsz,\n 1,\n tgt_len,\n src_len,\n ), f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = F.softmax(attn_weights, dim=-1)\n\n if output_attentions:\n # this operation is a bit akward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n assert attn_output.size() == (\n bsz * self.num_heads,\n tgt_len,\n self.head_dim,\n ), f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"\n\n attn_output = (\n attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n .transpose(1, 2)\n .reshape(bsz, tgt_len, embed_dim)\n )\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\n# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot\nclass BlenderbotEncoderLayer(nn.Module):\n def __init__(self, config: BlenderbotConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = BlenderbotAttention(\n embed_dim=self.embed_dim,\n num_heads=config.encoder_attention_heads,\n dropout=config.attention_dropout,\n )\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool = False):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, attn_weights, _ = self.self_attn(\n hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\n# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot\nclass BlenderbotDecoderLayer(nn.Module):\n def __init__(self, config: BlenderbotConfig):\n super().__init__()\n self.embed_dim = config.d_model\n\n self.self_attn = BlenderbotAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.encoder_attn = BlenderbotAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n )\n self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = True,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n past_key_value=cross_attn_past_key_value,\n output_attentions=output_attentions,\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\nclass BlenderbotPreTrainedModel(PreTrainedModel):\n config_class = BlenderbotConfig\n base_model_prefix = \"model\"\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n @property\n def dummy_inputs(self):\n pad_token = self.config.pad_token_id\n input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)\n dummy_inputs = {\n \"attention_mask\": input_ids.ne(pad_token),\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n }\n return dummy_inputs\n\n\nBLENDERBOT_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BlenderbotConfig`):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBLENDERBOT_GENERATION_EXAMPLE = r\"\"\"\n Conversation example::\n\n >>> from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration\n >>> mname = 'facebook/blenderbot-400M-distill'\n >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = BlenderbotTokenizer.from_pretrained(mname)\n >>> UTTERANCE = \"My friends are cool but they eat too many carbs.\"\n >>> print(\"Human: \", UTTERANCE)\n >>> inputs = tokenizer([UTTERANCE], return_tensors='pt')\n >>> reply_ids = model.generate(**inputs)\n >>> print(\"Bot: \", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])\n\n >>> REPLY = \"I'm not sure\"\n >>> print(\"Human: \", REPLY)\n >>> NEXT_UTTERANCE = (\n ... \"My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. \"\n ... \"Are they trying to lose weight or are they just trying to be healthier?</s> \"\n ... \"<s> I'm not sure.\"\n ... )\n >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors='pt')\n >>> next_reply_ids = model.generate(**inputs)\n >>> print(\"Bot: \", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])\n\"\"\"\n\nBLENDERBOT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.BlenderbotTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BlenderbotTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n\n Blenderbot uses the :obj:`bos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If\n :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see\n :obj:`past_key_values`).\n decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read :func:`modeling_blenderbot._prepare_decoder_inputs`\n and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more\n information on the default strategy.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:\n :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,\n `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder.\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded\n representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`\n have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert\n :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`\n takes the value of :obj:`inputs_embeds`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\nclass BlenderbotEncoder(BlenderbotPreTrainedModel):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`BlenderbotEncoderLayer`.\n\n Args:\n config: BlenderbotConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = config.d_model\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)\n\n self.embed_positions = BlenderbotLearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n self.padding_idx,\n )\n self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)])\n self.layer_norm = nn.LayerNorm(config.d_model)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BlenderbotTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n\n hidden_states = inputs_embeds + embed_pos\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop): # skip the layer\n layer_outputs = (None, None)\n else:\n if getattr(self.config, \"gradient_checkpointing\", False):\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(encoder_layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # add final layer norm\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\nclass BlenderbotDecoder(BlenderbotPreTrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BlenderbotDecoderLayer`\n\n Args:\n config: BlenderbotConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)\n\n self.embed_positions = BlenderbotLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n self.padding_idx,\n )\n self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)])\n self.layer_norm = nn.LayerNorm(config.d_model)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BlenderbotTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length\n ).to(self.device)\n\n if attention_mask is not None and combined_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = combined_attention_mask + _expand_mask(\n attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]\n )\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n # in constrast to Bart, Blenderbot applies layernorm on inputs_embeds\n hidden_states = inputs_embeds + positions\n\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions else None\n next_decoder_cache = () if use_cache else None\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False):\n if use_cache:\n raise ValueError(\n \"When using `gradient_checkpointing, make sure that `use_cache=False` and `config.use_cache=False`.\"\n )\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, use_cache)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n combined_attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n None,\n )\n else:\n\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=combined_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n all_cross_attentions += (layer_outputs[2],)\n\n # add final layer norm\n hidden_states = self.layer_norm(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare Blenderbot Model outputting raw hidden-states without any specific head on top.\",\n BLENDERBOT_START_DOCSTRING,\n)\nclass BlenderbotModel(BlenderbotPreTrainedModel):\n def __init__(self, config: BlenderbotConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = BlenderbotEncoder(config, self.shared)\n self.decoder = BlenderbotDecoder(config, self.shared)\n\n self.init_weights()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):\n if pretrained_model_name_or_path == \"facebook/blenderbot-90M\":\n warnings.warn(\n \"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.\",\n FutureWarning,\n )\n return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)\n\n return super(BlenderbotModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Returns:\n\n Example::\n\n >>> from transformers import BlenderbotTokenizer, BlenderbotModel\n\n >>> model = BlenderbotModel.from_pretrained(\"facebook/blenderbot-400M-distill\")\n >>> tokenizer = BlenderbotTokenizer.from_pretrained(\"facebook/blenderbot-400M-distill\")\n\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"The Blenderbot Model with a language modeling head. Can be used for summarization.\", BLENDERBOT_START_DOCSTRING\n)\nclass BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder\\.version\",\n r\"decoder\\.version\",\n r\"lm_head\\.weight\",\n ]\n\n def __init__(self, config: BlenderbotConfig):\n super().__init__(config)\n self.model = BlenderbotModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):\n if pretrained_model_name_or_path == \"facebook/blenderbot-90M\":\n warnings.warn(\n \"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.\",\n FutureWarning,\n )\n return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)\n\n return super(BlenderbotForConditionalGeneration, cls).from_pretrained(\n pretrained_model_name_or_path, *model_args, **kwargs\n )\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs\n ):\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def adjust_logits_during_generation(self, logits, cur_len, max_length):\n if cur_len == max_length - 1 and self.config.eos_token_id is not None:\n self._force_token_id_to_be_generated(logits, self.config.eos_token_id)\n return logits\n\n @staticmethod\n def _force_token_id_to_be_generated(scores, token_id) -> None:\n \"\"\"force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float(\"inf\"))\"\"\"\n scores[:, [x for x in range(scores.shape[1]) if x != token_id]] = -float(\"inf\")\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.LayerNorm",
"torch.arange",
"torch.isnan",
"torch.finfo",
"torch.nn.functional.dropout",
"torch.nn.CrossEntropyLoss",
"torch.bmm",
"torch.clamp",
"torch.tensor",
"torch.isinf",
"torch.nn.functional.softmax",
"torch.nn.Embedding"
]
]
|
smiton/IllegalWebsiteClassifier | [
"830f755fe29708de4fff815cb2600413831fcaf5"
]
| [
"models/bert_MCNN.py"
]
| [
"# coding: UTF-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorch_pretrained import BertModel, BertTokenizer\n\n\nclass Config(object):\n\n \"\"\"配置参数\"\"\"\n def __init__(self, dataset):\n self.model_name = 'bert'\n self.train_path = dataset + '/data/train.txt' # 训练集\n self.dev_path = dataset + '/data/dev.txt' # 验证集\n self.test_path = dataset + '/data/test.txt' # 测试集\n self.class_list = [x.strip() for x in open(\n dataset + '/data/class.txt').readlines()] # 类别名单\n self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备\n\n self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练\n self.num_classes = len(self.class_list) # 类别数\n self.num_epochs = 3 # epoch数\n self.batch_size = 64 # mini-batch大小\n self.pad_size = 32 # 每句话处理成的长度(短填长切)\n self.learning_rate = 5e-5 # 学习率\n self.bert_path = './bert_pretrain'\n self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)\n self.hidden_size = 768\n self.filter_sizes = (2, 3, 4) # 卷积核尺寸\n self.num_filters = 256 # 卷积核数量(channels数)\n self.dropout = 0.1\n\n\nclass Model(nn.Module):\n\n def __init__(self, config):\n super(Model, self).__init__()\n self.bert = BertModel.from_pretrained(config.bert_path)\n for param in self.bert.parameters():\n param.requires_grad = True\n self.convs = nn.ModuleList(\n [nn.Conv2d(1, config.num_filters, (k, config.hidden_size*4)) for k in config.filter_sizes])\n self.dropout = nn.Dropout(config.dropout)\n\n self.fc_cnn = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)\n\n def conv_and_pool(self, x, conv):\n x = F.relu(conv(x)).squeeze(3)\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n return x\n\n def forward(self, x):\n context = x[0] # 输入的句子\n mask = x[2] # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]\n encoder_out, text_cls = self.bert(context, attention_mask=mask, output_all_encoded_layers=True)\n\n encoder_out = encoder_out[-4:]\n encoder_out = torch.cat([conv for conv in encoder_out], 2)\n\n out = encoder_out.unsqueeze(1)\n out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)\n out = self.dropout(out)\n out = self.fc_cnn(out)\n return out\n"
]
| [
[
"torch.nn.Dropout",
"torch.cat",
"torch.cuda.is_available",
"torch.nn.Conv2d"
]
]
|
tcapelle/pvfactors | [
"1aaf6cdd3066a3a68d93db4ad7abcf10e97b5620"
]
| [
"pvfactors/tests/test_geometry/test_pvarray.py"
]
| [
"import os\nimport numpy as np\nimport pandas as pd\nfrom pvfactors.geometry import OrderedPVArray, PVGround, PVSurface\nfrom pvfactors.geometry.utils import contains\nfrom pvfactors.config import MAX_X_GROUND, MIN_X_GROUND\nfrom pvfactors.tests.test_geometry.test_data import \\\n vm_flat_orderedpvarray, vm_right_orderedpvarray\n\n\ndef test_ordered_pvarray_from_dict(params):\n \"\"\"Test that can successfully create ordered pvarray from parameters dict,\n and that the axis azimuth convention works correctly (via normal vector)\n \"\"\"\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n\n # Test that ground is created successfully\n assert isinstance(pvarray.ground, PVGround)\n # TODO: check why this is not matching exactly: hint = look at length\n # of ground shaded surfaces, some small tolerance may be chipped away\n np.testing.assert_allclose(pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n # Test the front and back sides\n assert len(pvarray.pvrows) == 3\n np.testing.assert_array_equal(\n pvarray.pvrows[0].front.n_vector, -pvarray.pvrows[0].back.n_vector)\n assert pvarray.pvrows[0].front.shaded_length == 0\n assert pvarray.gcr == params['gcr']\n assert np.abs(pvarray.rotation_vec) == params['surface_tilt']\n assert pvarray.pvrows[0].front.n_vector[0] > 0\n distance_between_pvrows = \\\n pvarray.pvrows[1].centroid.x - pvarray.pvrows[0].centroid.x\n assert distance_between_pvrows == 5.0\n\n # Orient the array the other way\n params.update({'surface_azimuth': 270.})\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n assert pvarray.pvrows[0].front.n_vector[0] < 0\n\n\ndef test_plot_ordered_pvarray():\n \"\"\"Test that ordered pv array plotting works correctly\"\"\"\n is_ci = os.environ.get('CI', False)\n if not is_ci:\n import matplotlib.pyplot as plt\n\n # Create base params\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'surface_azimuth': 90., # east oriented modules / point right\n 'axis_azimuth': 0., # axis of rotation towards North\n 'surface_tilt': 20.,\n 'gcr': 0.4,\n 'solar_zenith': 20.,\n 'solar_azimuth': 90., # sun located in the east\n 'rho_ground': 0.2,\n 'rho_front_pvrow': 0.01,\n 'rho_back_pvrow': 0.03\n }\n\n # Plot simple ordered pv array\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n f, ax = plt.subplots()\n ordered_pvarray.plot(ax)\n plt.show()\n\n # Plot discretized ordered pv array\n params.update({'cut': {0: {'front': 5}, 1: {'back': 3}},\n 'surface_azimuth': 270.}) # point left\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n f, ax = plt.subplots()\n ordered_pvarray.plot(ax)\n plt.show()\n\n\ndef test_discretization_ordered_pvarray(discr_params):\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(discr_params)\n pvrows = pvarray.pvrows\n\n assert len(pvrows[0].front.list_segments) == 5\n assert len(pvrows[0].back.list_segments) == 1\n assert len(pvrows[1].back.list_segments) == 3\n assert len(pvrows[1].front.list_segments) == 2\n\n\ndef test_ordered_pvarray_gnd_shadow_casting(params):\n \"\"\"Test shadow casting on ground, no inter-row shading\"\"\"\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 3\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 7\n assert ordered_pvarray.ground.shaded_length == 6.385066634855473\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_right(params_direct_shading):\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params_direct_shading)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 2\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n np.testing.assert_allclose(ordered_pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_left(params_direct_shading):\n\n params_direct_shading.update({'solar_azimuth': 270,\n 'surface_azimuth': 270})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params_direct_shading)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 2\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n np.testing.assert_allclose(ordered_pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_back(params_direct_shading):\n\n params_direct_shading.update({'solar_azimuth': 270,\n 'surface_tilt': 120})\n\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params_direct_shading)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 2\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n np.testing.assert_allclose(ordered_pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n # Shading length should be identical as in previous test for front surface,\n # but now with back surface\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_right_n_seg(\n params_direct_shading):\n\n params_direct_shading.update({'cut': {1: {'front': 7}}})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params_direct_shading)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 2\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n np.testing.assert_allclose(ordered_pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n # Test pvrow sides: should be the same as without segments\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.)\n\n # Test individual segments\n center_row = ordered_pvarray.pvrows[1]\n list_pvsegments = center_row.front.list_segments\n fully_shaded_segment = list_pvsegments[-1]\n partial_shaded_segment = list_pvsegments[-2]\n assert fully_shaded_segment.illum_collection.is_empty\n np.testing.assert_almost_equal(\n fully_shaded_segment.shaded_collection.length,\n list_pvsegments[0].length)\n assert partial_shaded_segment.shaded_collection.length > 0\n assert partial_shaded_segment.illum_collection.length > 0\n sum_lengths = (partial_shaded_segment.illum_collection.length +\n partial_shaded_segment.shaded_collection.length)\n np.testing.assert_almost_equal(sum_lengths, list_pvsegments[0].length)\n\n\ndef test_ordered_pvarray_gnd_pvrow_shadow_casting_back_n_seg(\n params_direct_shading):\n\n params_direct_shading.update({'cut': {1: {'back': 7}},\n 'solar_azimuth': 270,\n 'surface_tilt': 120})\n # Test front shading on right\n ordered_pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params_direct_shading)\n # Check shadow casting on ground\n assert len(ordered_pvarray.ground.list_segments[0]\n .shaded_collection.list_surfaces) == 2\n assert len(ordered_pvarray.ground.list_segments[0]\n .illum_collection.list_surfaces) == 4\n np.testing.assert_allclose(ordered_pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n\n # Shading length should be identical as in previous test for front surface,\n # but now with back surface\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].back.shaded_length, 0.33333333333333254)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].back.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[2].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[1].front.shaded_length, 0.)\n np.testing.assert_almost_equal(\n ordered_pvarray.pvrows[0].front.shaded_length, 0.)\n\n # Test individual segments\n center_row = ordered_pvarray.pvrows[1]\n list_pvsegments = center_row.back.list_segments\n fully_shaded_segment = list_pvsegments[-1]\n partial_shaded_segment = list_pvsegments[-2]\n assert fully_shaded_segment.illum_collection.is_empty\n np.testing.assert_almost_equal(\n fully_shaded_segment.shaded_collection.length,\n list_pvsegments[0].length)\n assert partial_shaded_segment.shaded_collection.length > 0\n assert partial_shaded_segment.illum_collection.length > 0\n sum_lengths = (partial_shaded_segment.illum_collection.length +\n partial_shaded_segment.shaded_collection.length)\n np.testing.assert_almost_equal(sum_lengths, list_pvsegments[0].length)\n\n\ndef test_ordered_pvarray_cuts_for_pvrow_view(ordered_pvarray):\n \"\"\"Test that pvarray ground is cut correctly\"\"\"\n\n n_surfaces_ground_before_cut = 7\n ground_length = 200.0\n\n n_surfaces_1 = ordered_pvarray.ground.n_surfaces\n len_1 = ordered_pvarray.ground.length\n\n assert n_surfaces_1 == n_surfaces_ground_before_cut + 3\n np.testing.assert_allclose(len_1, ground_length)\n\n\ndef test_ordered_pvarray_list_surfaces(ordered_pvarray):\n \"\"\"Check that getting a correct list of surfaces\"\"\"\n n_surfaces = ordered_pvarray.n_surfaces\n list_surfaces = ordered_pvarray.all_surfaces\n\n assert isinstance(list_surfaces, list)\n assert len(list_surfaces) == n_surfaces\n assert isinstance(list_surfaces[0], PVSurface)\n\n\ndef test_build_surface_registry(ordered_pvarray):\n \"\"\"Test that building surface registry correctly\"\"\"\n reg = ordered_pvarray.surface_registry\n\n assert reg.shape[0] == ordered_pvarray.n_surfaces\n assert reg.shape[1] == len(ordered_pvarray.registry_cols)\n\n\ndef test_get_all_surface_indices(ordered_pvarray):\n\n # Check surface indices before indexing\n surf_indices = ordered_pvarray.surface_indices\n assert surf_indices == [None] * ordered_pvarray.n_surfaces\n\n # Check surface indices after indexing\n ordered_pvarray.index_all_surfaces()\n surf_indices = ordered_pvarray.surface_indices\n np.testing.assert_array_equal(surf_indices,\n range(ordered_pvarray.n_surfaces))\n\n\ndef test_view_matrix_flat(params):\n\n # Make flat\n params.update({'surface_tilt': 0})\n\n # Create pvarray\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n\n # Build view matrix\n vm = pvarray.view_matrix\n\n assert vm.shape[0] == pvarray.n_surfaces + 1\n np.testing.assert_array_equal(vm, vm_flat_orderedpvarray)\n\n\ndef test_view_matrix(params):\n\n params.update({'surface_azimuth': 270})\n\n # Create pvarray\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n\n # Build view matrix and obstruction matrix\n vm, om = pvarray._build_view_matrix()\n\n assert vm.shape[0] == pvarray.n_surfaces + 1\n np.testing.assert_array_equal(vm, vm_right_orderedpvarray)\n # The view matrix mask should be symmetric\n mask_vm = np.where(vm != 0, 1, 0)\n np.testing.assert_array_equal(mask_vm[:-1, :-1], mask_vm.T[:-1, :-1])\n # Removing sky row and column because didn't fill the last row\n\n # The obstruction matrix should be symmetric\n np.testing.assert_array_equal(om, om.T)\n # TODO: test values against saved array\n\n\ndef test_param_names(params):\n\n param_names = ['qinc']\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(\n params, param_names=param_names)\n\n # Set all surfaces parameters to 1\n pvarray.update_params({'qinc': 1})\n\n # Check that all surfaces of the correct surface params\n all_surfaces = pvarray.all_surfaces\n for surf in all_surfaces:\n assert surf.param_names == param_names\n assert surf.get_param('qinc') == 1\n\n # Check weighted values\n np.testing.assert_almost_equal(\n pvarray.ground.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvarray.ground.get_param_ww('qinc'),\n pvarray.ground.length)\n for pvrow in pvarray.pvrows:\n # Front\n np.testing.assert_almost_equal(\n pvrow.front.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvrow.front.get_param_ww('qinc'), pvrow.front.length)\n # Back\n np.testing.assert_almost_equal(\n pvrow.back.get_param_weighted('qinc'), 1)\n np.testing.assert_almost_equal(\n pvrow.back.get_param_ww('qinc'), pvrow.back.length)\n\n\ndef test_orderedpvarray_neighbors(params):\n \"\"\"Check that pvrow neighbors are determined correctly\"\"\"\n\n pvarray_right = OrderedPVArray.transform_from_dict_of_scalars(params)\n params.update({'surface_azimuth': 270})\n pvarray_left = OrderedPVArray.transform_from_dict_of_scalars(params)\n\n # Check\n l1 = [None, 0, 1]\n l2 = [1, 2, None]\n np.testing.assert_array_equal(pvarray_right.front_neighbors, l2)\n np.testing.assert_array_equal(pvarray_right.back_neighbors, l1)\n np.testing.assert_array_equal(pvarray_left.front_neighbors, l1)\n np.testing.assert_array_equal(pvarray_left.back_neighbors, l2)\n\n\ndef test_orderedpvarray_almost_flat():\n \"\"\"Making sure that things are correct when the pvarray is almost flat\n and the sun is very low, which means that the shadows on the ground, and\n the edge points will be outside of ground range (since not infinite)\"\"\"\n\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'surface_azimuth': 90., # east oriented modules\n 'axis_azimuth': 0., # axis of rotation towards North\n 'surface_tilt': 0.01, # almost flat\n 'gcr': 0.4,\n 'solar_zenith': 89.9, # sun super low\n 'solar_azimuth': 90., # sun located in the east\n }\n\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n view_matrix = pvarray.view_matrix\n\n ground_seg = pvarray.ground.list_segments[0]\n # there should be no visible shadow on the ground\n assert len(ground_seg.shaded_collection.list_surfaces) == 0\n # all of the edge points should be outside of range of ground geometry\n for edge_pt in pvarray.edge_points:\n assert not contains(pvarray.ground.original_linestring, edge_pt)\n\n # Check values of view matrix mask, to make sure that front does not\n # see the ground\n vm_mask = np.where(view_matrix > 0, 1, 0)\n expected_vm_mask = [\n [0, 0, 1, 0, 1, 0, 1, 1], # ground\n [0, 0, 0, 0, 1, 0, 0, 1], # front\n [1, 0, 0, 0, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 1, 1], # front\n [1, 1, 0, 0, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 0, 1], # front\n [1, 0, 0, 1, 0, 0, 0, 1], # back\n [0, 0, 0, 0, 0, 0, 0, 0]]\n np.testing.assert_array_equal(vm_mask, expected_vm_mask)\n\n\ndef test_time_ordered_pvarray(params):\n\n from pvfactors.viewfactors import VFCalculator\n\n import time\n n = 100\n list_elapsed = []\n for _ in range(n):\n tic = time.time()\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n pvarray.index_all_surfaces()\n # sr = pvarray.surface_registry\n # vm = pvarray.view_matrix\n vm, om = pvarray._build_view_matrix()\n geom_dict = pvarray.dict_surfaces\n\n calculator = VFCalculator()\n # number 1 time consuming, triples run time\n vf_matrix = calculator.get_vf_matrix(geom_dict, vm, om,\n pvarray.pvrows)\n toc = time.time()\n list_elapsed.append(toc - tic)\n\n print(\"\\nAvg time elapsed: {} s\".format(np.mean(list_elapsed)))\n\n\ndef test_ordered_pvarray_gnd_shadow_casting_tolerance():\n \"\"\"It seems that there are roundoff errors when running shadow casting\n on some computers, test that this case works.\"\"\"\n\n params = {'axis_azimuth': 0,\n 'gcr': 0.3,\n 'n_pvrows': 3,\n 'pvrow_height': 1.8,\n 'pvrow_width': 1.98,\n 'solar_azimuth': 263.99310644558074,\n 'solar_zenith': 73.91658668648401,\n 'surface_azimuth': 270.0,\n 'surface_tilt': 51.98206680806641}\n pvarray_w_direct_shading = OrderedPVArray.transform_from_dict_of_scalars(\n params)\n\n # Check that 3 shadows on ground\n assert (pvarray_w_direct_shading.ground.list_segments[0]\n .shaded_collection.n_surfaces) == 5\n # Check that there is no shading on the center pv row\n pvrow = pvarray_w_direct_shading.pvrows[1]\n assert (pvrow.front.list_segments[0]\n .shaded_collection.n_surfaces) == 0\n\n\ndef test_coords_ground_shadows():\n\n # Create base params\n params = {\n 'axis_azimuth': 0,\n 'n_pvrows': 2,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'gcr': 0.4,\n 'cut': {0: {'front': 5}, 1: {'back': 3}}\n }\n\n # Timeseries parameters for testing\n solar_zenith = np.array([20., 45.])\n solar_azimuth = np.array([70., 200.])\n surface_tilt = np.array([10., 70.])\n surface_azimuth = np.array([90., 270.])\n\n # Plot simple ordered pv array\n ordered_pvarray = OrderedPVArray(**params)\n ordered_pvarray.fit(solar_zenith, solar_azimuth, surface_tilt,\n surface_azimuth)\n\n expected_gnd_shadow_coords = [\n [([-1.89924929, 0.19163641], [0., 0.]),\n ([0.18914857, 1.51846431], [0., 0.])],\n [([3.10075071, 5.19163641], [0., 0.]),\n ([5.18914857, 6.51846431], [0., 0.])]\n ]\n gnd_shadow_coords = [shadow.coords.as_array\n for shadow in ordered_pvarray.ts_ground.shadows]\n\n np.testing.assert_almost_equal(\n expected_gnd_shadow_coords, gnd_shadow_coords)\n\n\ndef test_coords_cut_points():\n\n # Create base params\n params = {\n 'axis_azimuth': 0,\n 'n_pvrows': 2,\n 'pvrow_height': 2.5,\n 'pvrow_width': 2.,\n 'gcr': 0.4,\n 'cut': {0: {'front': 5}, 1: {'back': 3}}\n }\n\n # Timeseries parameters for testing\n solar_zenith = np.array([20., 45.])\n solar_azimuth = np.array([70., 200.])\n surface_tilt = np.array([10., 70.])\n surface_azimuth = np.array([90., 270.])\n\n # Plot simple ordered pv array\n ordered_pvarray = OrderedPVArray(**params)\n ordered_pvarray.fit(solar_zenith, solar_azimuth, surface_tilt,\n surface_azimuth)\n\n expected_cut_point_coords = [\n [[14.17820455, -0.90992559], [0., 0.]],\n [[19.17820455, 4.09007441], [0., 0.]]]\n\n cut_pt_coords = [cut_point.as_array\n for cut_point in\n ordered_pvarray.ts_ground.cut_point_coords]\n np.testing.assert_almost_equal(\n expected_cut_point_coords, cut_pt_coords)\n\n\ndef test_ordered_pvarray_from_dict_w_direct_shading():\n \"\"\"Test that can successfully create ordered pvarray from parameters dict,\n and that the axis azimuth convention works correctly (via normal vector),\n and check that ground surfaces make sense.\n Came from direct shading case where ground shadows not correctly created\n \"\"\"\n # Specify array parameters\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 1,\n 'pvrow_width': 1,\n 'axis_azimuth': 0.,\n 'gcr': 0.4,\n 'rho_front_pvrow': 0.01,\n 'rho_back_pvrow': 0.03,\n 'solar_zenith': 74,\n 'solar_azimuth': 229,\n 'surface_tilt': 50,\n 'surface_azimuth': 270\n }\n pvarray = OrderedPVArray.transform_from_dict_of_scalars(params)\n\n # Test that ground is created successfully\n assert isinstance(pvarray.ground, PVGround)\n np.testing.assert_equal(pvarray.ground.length,\n MAX_X_GROUND - MIN_X_GROUND)\n np.testing.assert_equal(pvarray.pvrows[0].length, 2)\n np.testing.assert_equal(pvarray.pvrows[1].length, 2)\n np.testing.assert_equal(pvarray.pvrows[2].length, 2)\n\n # Test the front and back sides\n assert len(pvarray.pvrows) == 3\n np.testing.assert_array_equal(\n pvarray.pvrows[0].front.n_vector, -pvarray.pvrows[0].back.n_vector)\n np.testing.assert_allclose(pvarray.pvrows[1].front.shaded_length,\n 0.05979874)\n assert pvarray.gcr == params['gcr']\n assert np.abs(pvarray.rotation_vec) == params['surface_tilt']\n assert pvarray.pvrows[0].front.n_vector[0] < 0\n distance_between_pvrows = \\\n pvarray.pvrows[1].centroid.x - pvarray.pvrows[0].centroid.x\n assert distance_between_pvrows == 2.5\n\n\ndef test_ordered_pvarray_direct_shading():\n \"\"\"Test that direct shading is calculated correctly in the following\n 5 situations:\n - PV rows tilted to the left and front side shading\n - PV rows tilted to the right and front side shading\n - PV rows tilted to the left and back side shading\n - PV rows tilted to the right and back side shading\n - no shading\n \"\"\"\n # Base params\n params = {\n 'n_pvrows': 3,\n 'pvrow_height': 1,\n 'pvrow_width': 1,\n 'axis_azimuth': 0.,\n 'gcr': 0.5\n }\n # Timeseries inputs\n df_inputs = pd.DataFrame({\n 'solar_zenith': [70., 80., 80., 70., 10.],\n 'solar_azimuth': [270., 90., 270., 90., 90.],\n 'surface_tilt': [45., 45., 45., 45., 45.],\n 'surface_azimuth': [270., 270., 90., 90., 90.]})\n\n # Initialize and fit pv array\n pvarray = OrderedPVArray.init_from_dict(params)\n # Fit pv array to timeseries data\n pvarray.fit(df_inputs.solar_zenith, df_inputs.solar_azimuth,\n df_inputs.surface_tilt, df_inputs.surface_azimuth)\n\n expected_ts_front_shading = [0.24524505, 0., 0., 0.24524505, 0.]\n expected_ts_back_shading = [0., 0.39450728, 0.39450728, 0., 0.]\n\n # Test that timeseries shading calculated correctly\n np.testing.assert_allclose(expected_ts_front_shading,\n pvarray.shaded_length_front)\n np.testing.assert_allclose(expected_ts_back_shading,\n pvarray.shaded_length_back)\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"matplotlib.pyplot.subplots",
"numpy.mean",
"numpy.where",
"numpy.abs",
"matplotlib.pyplot.show"
]
]
|
md12g12/pymeasure | [
"f5b693543c4c4a4fdc2350e9fda00140a74bc50d"
]
| [
"pymeasure/instruments/keithley/keithley2600.py"
]
| [
"# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\nimport time\nimport numpy as np\nfrom pymeasure.instruments import Instrument\nfrom pymeasure.instruments.validators import truncated_range, strict_discrete_set\n\n# Setup logging\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass Keithley2600(Instrument):\n \"\"\"Represents the Keithley 2600 series (channel A and B) SourceMeter\"\"\"\n\n def __init__(self, adapter, **kwargs):\n super(Keithley2600, self).__init__(\n adapter,\n \"Keithley 2600 SourceMeter\",\n **kwargs\n )\n self.ChA = Channel(self, 'a')\n self.ChB = Channel(self, 'b')\n\n @property\n def error(self):\n \"\"\" Returns a tuple of an error code and message from a\n single error. \"\"\"\n err = self.ask('print(errorqueue.next())')\n err = err.split('\\t')\n # Keithley Instruments Inc. sometimes on startup\n # if tab delimitated message is greater than one, grab first two as code, message\n # otherwise, assign code & message to returned error\n if len(err) > 1:\n err = (int(float(err[0])), err[1])\n code = err[0]\n message = err[1].replace('\"', '')\n else:\n code = message = err[0]\n log.info(\"ERROR %s,%s - len %s\" % (str(code), str(message), str(len(err))))\n return (code, message)\n\n def check_errors(self):\n \"\"\" Logs any system errors reported by the instrument.\n \"\"\"\n code, message = self.error\n while code != 0:\n t = time.time()\n log.info(\"Keithley 2600 reported error: %d, %s\" % (code, message))\n code, message = self.error\n if (time.time() - t) > 10:\n log.warning(\"Timed out for Keithley 2600 error retrieval.\")\n\nclass Channel(object):\n\n def __init__(self, instrument, channel):\n self.instrument = instrument\n self.channel = channel\n\n def ask(self, cmd):\n return float(self.instrument.ask('print(smu%s.%s)' % (self.channel, cmd)))\n\n def write(self, cmd):\n self.instrument.write('smu%s.%s' % (self.channel, cmd))\n\n def values(self, cmd, **kwargs):\n \"\"\" Reads a set of values from the instrument through the adapter,\n passing on any key-word arguments.\n \"\"\"\n return self.instrument.values('print(smu%s.%s)' % (self.channel, cmd))\n\n def binary_values(self, cmd, header_bytes=0, dtype=np.float32):\n return self.instrument.binary_values('print(smu%s.%s)' % (self.channel, cmd,), header_bytes, dtype)\n\n def check_errors(self):\n return self.instrument.check_errors()\n\n source_output = Instrument.control(\n 'source.output', 'source.output=%d',\n \"\"\"Property controlling the channel output state (ON of OFF)\n \"\"\",\n validator=strict_discrete_set,\n values={'OFF': 0, 'ON': 1},\n map_values=True\n )\n\n source_mode = Instrument.control(\n 'source.func', 'source.func=%d',\n \"\"\"Property controlling the channel soource function (Voltage or Current)\n \"\"\",\n validator=strict_discrete_set,\n values={'voltage': 1, 'current': 0},\n map_values=True\n )\n\n measure_nplc = Instrument.control(\n 'measure.nplc', 'measure.nplc=%f',\n \"\"\" Property controlling the nplc value \"\"\",\n validator=truncated_range,\n values=[0.001, 25],\n map_values=True\n )\n\n ###############\n # Current (A) #\n ###############\n current = Instrument.measurement(\n 'measure.i()',\n \"\"\" Reads the current in Amps \"\"\"\n )\n\n source_current = Instrument.control(\n 'source.leveli', 'source.leveli=%f',\n \"\"\" Property controlling the applied source current \"\"\",\n validator=truncated_range,\n values=[-1.5, 1.5]\n )\n\n compliance_current = Instrument.control(\n 'source.limiti', 'source.limiti=%f',\n \"\"\" Property controlling the source compliance current \"\"\",\n validator=truncated_range,\n values=[-1.5, 1.5]\n )\n\n source_current_range = Instrument.control(\n 'source.rangei', 'source.rangei=%f',\n \"\"\"Property controlling the source current range \"\"\",\n validator=truncated_range,\n values=[-1.5, 1.5]\n )\n\n current_range = Instrument.control(\n 'measure.rangei', 'measure.rangei=%f',\n \"\"\"Property controlling the measurement current range \"\"\",\n validator=truncated_range,\n values=[-1.5, 1.5]\n )\n\n ###############\n # Voltage (V) #\n ###############\n voltage = Instrument.measurement(\n 'measure.v()',\n \"\"\" Reads the voltage in Volts \"\"\"\n )\n\n source_voltage = Instrument.control(\n 'source.levelv', 'source.levelv=%f',\n \"\"\" Property controlling the applied source voltage \"\"\",\n validator=truncated_range,\n values=[-200, 200]\n )\n\n compliance_voltage = Instrument.control(\n 'source.limitv', 'source.limiv=%f',\n \"\"\" Property controlling the source compliance voltage \"\"\",\n validator=truncated_range,\n values=[-200, 200]\n )\n\n source_voltage_range = Instrument.control(\n 'source.rangev', 'source.rangev=%f',\n \"\"\"Property controlling the source current range \"\"\",\n validator=truncated_range,\n values=[-200, 200]\n )\n\n voltage_range = Instrument.control(\n 'measure.rangev', 'measure.rangev=%f',\n \"\"\"Property controlling the measurement voltage range \"\"\",\n validator=truncated_range,\n values=[-200, 200]\n )\n\n ####################\n # Resistance (Ohm) #\n ####################\n resistance = Instrument.measurement(\n 'measure.r()',\n \"\"\" Reads the resistance in Ohms \"\"\"\n )\n\n wires_mode = Instrument.control(\n 'sense', 'sense=%d',\n \"\"\"Property controlling the resistance measurement mode: 4 wires or 2 wires\"\"\",\n validator=strict_discrete_set,\n values={'4': 1, '2': 0},\n map_values=True\n )\n\n #######################\n # Measurement Methods #\n #######################\n\n def measure_voltage(self, nplc=1, voltage=21.0, auto_range=True):\n \"\"\" Configures the measurement of voltage.\n :param nplc: Number of power line cycles (NPLC) from 0.001 to 25\n :param voltage: Upper limit of voltage in Volts, from -200 V to 200 V\n :param auto_range: Enables auto_range if True, else uses the set voltage\n \"\"\"\n log.info(\"%s is measuring voltage.\" % self.channel)\n self.write('measure.v()')\n self.write('measure.nplc=%f' % nplc)\n if auto_range:\n self.write('measure.autorangev=1')\n else:\n self.voltage_range = voltage\n self.check_errors()\n\n def measure_current(self, nplc=1, current=1.05e-4, auto_range=True):\n \"\"\" Configures the measurement of current.\n :param nplc: Number of power line cycles (NPLC) from 0.001 to 25\n :param current: Upper limit of current in Amps, from -1.5 A to 1.5 A\n :param auto_range: Enables auto_range if True, else uses the set current\n \"\"\"\n log.info(\"%s is measuring current.\" % self.channel)\n self.write('measure.i()')\n self.write('measure.nplc=%f' % nplc)\n if auto_range:\n self.write('measure.autorangei=1')\n else:\n self.current_range = current\n self.check_errors()\n\n def auto_range_source(self):\n \"\"\" Configures the source to use an automatic range.\n \"\"\"\n if self.source_mode == 'current':\n self.write('source.autorangei=1')\n else:\n self.write('source.autorangev=1')\n\n def apply_current(self, current_range=None, compliance_voltage=0.1):\n \"\"\" Configures the instrument to apply a source current, and\n uses an auto range unless a current range is specified.\n The compliance voltage is also set.\n :param compliance_voltage: A float in the correct range for a\n :attr:`~.Keithley2600.compliance_voltage`\n :param current_range: A :attr:`~.Keithley2600.current_range` value or None\n \"\"\"\n log.info(\"%s is sourcing current.\" % self.channel)\n self.source_mode = 'current'\n if current_range is None:\n self.auto_range_source()\n else:\n self.source_current_range = current_range\n self.compliance_voltage = compliance_voltage\n self.check_errors()\n\n def apply_voltage(self, voltage_range=None,\n compliance_current=0.1):\n \"\"\" Configures the instrument to apply a source voltage, and\n uses an auto range unless a voltage range is specified.\n The compliance current is also set.\n :param compliance_current: A float in the correct range for a\n :attr:`~.Keithley2600.compliance_current`\n :param voltage_range: A :attr:`~.Keithley2600.voltage_range` value or None\n \"\"\"\n log.info(\"%s is sourcing voltage.\" % self.channel)\n self.source_mode = 'voltage'\n if voltage_range is None:\n self.auto_range_source()\n else:\n self.source_voltage_range = voltage_range\n self.compliance_current = compliance_current\n self.check_errors()\n\n\n\n def ramp_to_voltage(self, target_voltage, steps=30, pause=0.1):\n \"\"\" Ramps to a target voltage from the set voltage value over\n a certain number of linear steps, each separated by a pause duration.\n :param target_voltage: A voltage in Amps\n :param steps: An integer number of steps\n :param pause: A pause duration in seconds to wait between steps \"\"\"\n voltages = np.linspace(self.source_voltage, target_voltage, steps)\n for voltage in voltages:\n self.source_voltage = voltage\n time.sleep(pause)\n\n def ramp_to_current(self, target_current, steps=30, pause=0.1):\n \"\"\" Ramps to a target current from the set current value over\n a certain number of linear steps, each separated by a pause duration.\n :param target_current: A current in Amps\n :param steps: An integer number of steps\n :param pause: A pause duration in seconds to wait between steps \"\"\"\n currents = np.linspace(self.source_current, target_current, steps)\n for current in currents:\n self.source_current = current\n time.sleep(pause)\n\n def shutdown(self):\n \"\"\" Ensures that the current or voltage is turned to zero\n and disables the output. \"\"\"\n log.info(\"Shutting down channel %s.\" % self.channel)\n if self.source_mode == 'current':\n self.ramp_to_current(0.0)\n else:\n self.ramp_to_voltage(0.0)\n self.source_output = 'OFF'"
]
| [
[
"numpy.linspace"
]
]
|
moiexpositoalonsolab/deepbiosphere | [
"a12e59c40d2c29b5428e4969ef8c3a0cb457e387"
]
| [
"deepbiosphere/scripts/GEOCLEF_Run.py"
]
| [
"import copy\nimport pickle\nimport pandas as pd\nimport argparse\nimport time\nimport numpy as np\nimport socket\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport itertools\nimport gc\nimport csv\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport math\nfrom tqdm import tqdm\n\nfrom deepbiosphere.scripts import GEOCLEF_CNN as cnn\nfrom deepbiosphere.scripts import GEOCLEF_Dataset as Dataset\nfrom deepbiosphere.scripts import GEOCLEF_Loss as losses\nfrom deepbiosphere.scripts import GEOCLEF_Utils as utils\nfrom deepbiosphere.scripts import GEOCLEF_Config as config\n\n\n\ndef better_split_train_test(full_dat):\n# shuffle = np.random.permutation(np.arange(len(dset)))\n# split = int(len(idxs)*split_amt) \n# test = set()\n# total = len(dset)\n# i = 0\n# while len(test) <= split:\n# test.update(full_dat.obs[shuffle[i], dataset.ids_idx])\n# i += 1 \n# test_idx = []\n# train_idx = []\n# for idx in np.arange(len(full_dat)):\n# test_idx.append(idx) if full_dat.obs[idx,0] in test else train_idx.append(idx)\n# train_sampler = SubsetRandomSampler(train_idx)\n# valid_sampler = SubsetRandomSampler(test_idx)\n test_idx = full_dat.test\n train_idx = full_dat.train\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(test_idx)\n return train_sampler, valid_sampler, {'train': train_idx, 'test' : test_idx}\n\n\ndef old_split_train_test(full_dat, split_amt):\n '''grab split_amt% of labeled data for holdout testing'''\n idxs = np.random.permutation(len(full_dat))\n split = int(len(idxs)*split_amt)\n training_idx, test_idx = idxs[:split], idxs[split:]\n train_sampler = SubsetRandomSampler(training_idx)\n valid_sampler = SubsetRandomSampler(test_idx)\n return train_sampler, valid_sampler, {'train': training_idx, 'test' : test_idx}\n\n\n \n\ndef check_mem():\n '''Grabs all in-scope tensors '''\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n print(type(obj), obj.size(), obj.device)\n except: pass\n\n\n\ndef setup_dataset(observation, base_dir, organism, region, normalize, altitude, dataset, threshold, num_species, inc_latlon, pretrained_dset):\n '''grab and setup train or test dataset'''\n# print(observation, base_dir, organism, region, normalize, altitude, dataset, threshold, num_species, :%slon)\n if dataset == 'satellite_only':\n return Dataset.HighRes_Satellite_Images_Only(base_dir, organism, region, observation, altitude, threshold, num_species, pretrained_dset)\n \n elif dataset == 'satellite_rasters_image':\n# (base_dir,organism, region, observation, threshold, topk)\n return Dataset.HighRes_Satellite_Rasters_LowRes(base_dir, organism, region, normalize, observation, altitude, threshold, num_species, inc_latlon)\n elif dataset == 'satellite_rasters_point':\n return Dataset.HighRes_Satellite_Rasters_Point(base_dir, organism, region, observation, altitude, normalize, threshold,num_species, inc_latlon, pretrained_dset)\n elif dataset == 'rasters_image':\n return Dataset.Bioclim_Rasters_Image(base_dir, organism, region, normalize, observation, threshold,num_species, inc_latlon)\n elif dataset == 'rasters_point':\n return Dataset.Bioclim_Rasters_Point(base_dir, organism, region, normalize, observation, threshold, num_species, inc_latlon)\n \n elif dataset == 'satellite_rasters_sheet':\n return Dataset.HighRes_Satellite_Rasters_Sheet(base_dir, organism, region, normalize, observation, altitude, threshold, num_species)\n else: \n raise NotImplementedError\n\n \ndef setup_model(model, train_dataset, pretrained, batch_norm, arch_type):\n\n num_specs = train_dataset.num_specs\n num_fams = train_dataset.num_fams\n num_gens = train_dataset.num_gens\n print(\"----- model ----\")\n print(\"model name is \", model)\n if model == 'RandomForest':\n raise NotImplementedError\n elif model == 'SVM':\n raise NotImplementedError\n # some flavor of convnet architecture\n elif model == 'ResNet_18':\n return cnn.ResNet_18(pretrained, arch_type, num_specs, num_gens, num_fams, train_dataset.base_dir)\n elif model == 'ResNet_34':\n return cnn.ResNet_34(pretrained, arch_type, num_specs, num_gens, num_fams, train_dataset.base_dir) \n elif model == 'TResNet_M':\n return cnn.TResNet_M(pretrained, num_specs, num_gens, num_fams, train_dataset.base_dir)\n elif model == 'TResNet_L':\n return cnn.TResNet_L(pretrained, num_specs, num_gens, num_fams, train_dataset.base_dir)\n elif model == 'VGG_11':\n return cnn.VGG_11(pretrained, batch_norm, num_specs, num_fams, num_gens, arch_type, train_dataset.base_dir)\n elif model == 'VGG_16':\n return cnn.VGG_16(pretrained, batch_norm, num_specs, num_fams, num_gens, arch_type, train_dataset.base_dir)\n elif model == 'FlatNet':\n return cnn.FlatNet(species=num_specs, families=num_fams, genera=num_gens, num_channels=train_dataset.channels)\n elif model == 'Joint_VGG11_MLP':\n return cnn.Joint_VGG11_MLP(num_specs, num_gens, num_fams, train_dataset.base_dir, batch_norm, arch_type, pretrained, train_dataset.num_rasters)\n elif model == 'Joint_VGG16_MLP':\n return cnn.Joint_VGG16_MLP(num_specs, num_gens, num_fams, train_dataset.base_dir, batch_norm, arch_type, pretrained, train_dataset.num_rasters)\n elif model == 'Joint_ResNet_18':\n return cnn.Joint_ResNet_18(pretrained, num_specs, num_gens, num_fams, train_dataset.num_rasters)\n elif model == 'Joint_TResNet_M':\n return cnn.Joint_TResNet_M(pretrained, num_specs, num_gens, num_fams, train_dataset.num_rasters, train_dataset.base_dir)\n elif model == 'Joint_TResNet_L':\n return cnn.Joint_TResNet_L(pretrained, num_specs, num_gens, num_fams, train_dataset.num_rasters, train_dataset.base_dir) \n elif model == 'MLP_Family':\n return cnn.MLP_Family(families=num_fams, env_rasters=train_dataset.num_rasters)\n elif model == 'MLP_Family_Genus':\n return cnn.MLP_Family_Genus(families=num_fams, genera=num_gens, env_rasters=train_dataset.num_rasters) \n elif model == 'MLP_Family_Genus_Species':\n return cnn.MLP_Family_Genus_Species(families=num_fams, genera=num_gens, species=num_specs, env_rasters=train_dataset.num_rasters)\n elif model == 'Old_MLP_Family_Genus_Species':\n return cnn.Old_MLP_Family_Genus_Species(families=num_fams, genera=num_gens, species=num_specs, env_rasters=train_dataset.num_rasters) \n \n elif model == 'SpecOnly':\n return cnn.SpecOnly(species=num_specs, num_channels=train_dataset.channels)\n else: \n exit(1), \"if you reach this, you got a real problem bucko\"\n\n \ndef setup_dataloader(dataset, dtype,batch_size, processes, sampler, model, joint_collate_fn=None):\n\n if joint_collate_fn is None:\n if dtype == 'satellite_rasters_point':\n collate_fn = joint_raster_collate_fn\n else:\n collate_fn = joint_collate_fn\n else:\n collate_fn = joint_collate_fn\n dataloader = DataLoader(dataset, batch_size, pin_memory=False, num_workers=processes, collate_fn=collate_fn, sampler=sampler, drop_last=False)\n return dataloader\n\n\n \ndef setup_loss(observation, dataset, loss, unweighted, device, loss_type, model):\n spec_loss, gen_loss, fam_loss = None, None, None\n if loss == 'none':\n return None, None, None\n if loss =='BrierAll':\n spec_loss= losses.BrierAll(loss_type)\n gen_loss = losses.BrierAll(loss_type)\n fam_loss = losses.BrierAll(loss_type)\n elif loss == 'BrierPresenceOnly':\n spec_loss= losses.BrierPresenceOnly(loss_type)\n gen_loss = losses.BrierPresenceOnly(loss_type)\n fam_loss = losses.BrierPresenceOnly(loss_type) \n elif loss == 'MultiLabelMarginLoss':\n spec_loss= losses.BrierAll(loss_type)\n gen_loss = losses.BrierAll(loss_type)\n fam_loss = losses.BrierAll(loss_type) \n elif loss == 'AsymmetricLoss':\n spec_loss= losses.AsymmetricLoss()\n gen_loss = losses.AsymmetricLoss()\n fam_loss = losses.AsymmetricLoss() \n elif loss == 'AsymmetricLossOptimized': \n spec_loss= losses.AsymmetricLossOptimized()\n gen_loss = losses.AsymmetricLossOptimized()\n fam_loss = losses.AsymmetricLossOptimized()\n # unweighted is True if model loss is to be unweighted\n # and false if model loss is to be weighted\n if not unweighted:\n spec_freq = Dataset.freq_from_dict(dataset.spec_freqs)\n gen_freq = Dataset.freq_from_dict(dataset.gen_freqs)\n fam_freq = Dataset.freq_from_dict(dataset.fam_freqs) \n spec_freq = 1.0 / torch.tensor(spec_freq, dtype=torch.float, device=device)\n gen_freq = 1.0 / torch.tensor(gen_freq, dtype=torch.float, device=device)\n fam_freq = 1.0 / torch.tensor(fam_freq, dtype=torch.float, device=device)\n if loss == 'BCEWithLogits':\n spec_loss = torch.nn.BCEWithLogitsLoss(spec_freq, reduction=loss_type)\n gen_loss = torch.nn.BCEWithLogitsLoss(gen_freq, reduction=loss_type)\n fam_loss = torch.nn.BCEWithLogitsLoss(fam_freq, reduction=loss_type)\n elif loss == 'CrossEntropyPresenceOnly':\n spec_loss= losses.CrossEntropyPresenceOnly(spec_freq, type=loss_type)\n gen_loss = losses.CrossEntropyPresenceOnly(gen_freq, type=loss_type)\n fam_loss = losses.CrossEntropyPresenceOnly(fam_freq, type=loss_type)\n else:\n if loss == 'BCEWithLogits':\n spec_loss = torch.nn.BCEWithLogitsLoss(reduction=loss_type)\n gen_loss = torch.nn.BCEWithLogitsLoss(reduction=loss_type)\n fam_loss = torch.nn.BCEWithLogitsLoss(reduction=loss_type)\n elif loss == 'CrossEntropyPresenceOnly':\n spec_loss= losses.CrossEntropyPresenceOnly(torch.ones(num_specs, dtype=torch.float, device=device), reduction=loss_type)\n gen_loss = losses.CrossEntropyPresenceOnly(torch.ones(num_gens, dtype=torch.float, device=device), reduction=loss_type)\n fam_loss = losses.CrossEntropyPresenceOnly(torch.ones(num_fams, dtype=torch.float, device=device), reduction=loss_type)\n\n \n if model == 'MLP_Family':\n gen_loss = None\n spec_loss = None\n elif model == 'MLP_Family_Genus':\n spec_loss == None\n if model == 'SpecOnly':\n fam_loss = None\n gen_loss = None\n\n\n \n return spec_loss, gen_loss, fam_loss\n \n \ndef clean_gpu(device):\n if device is not None:\n print(\"cleaning gpu\") \n torch.cuda.empty_cache()\n \n# def single_collate_fn(batch): \n# # batch is a list of tuples of (composite_label <np array [3]>, images <np array [6, 256, 256]>) \n# labs, img = zip(*batch) \n# print(labs[0][0], labs[0], labs)\n# lbs = [torch.tensor(l[0], dtype=torch.long) for l in labs] \n# img = [i.astype(np.uint8, copy=False) for i in img]\n# imgs = [torch.from_numpy(i) for i in img]\n# print(torch.stack(lbs).shape)\n# return torch.stack(lbs), torch.stack(imgs) \n\ndef joint_collate_fn(batch):\n # batch is a list of tuples of (specs_label, gens_label, fams_label, images) \n all_specs = []\n all_gens = []\n all_fams = []\n imgs = []\n #(specs_label, gens_label, fams_label, images) \n \n for (spec, gen, fam, img, _) in batch:\n specs_tens = torch.zeros(num_specs)\n specs_tens[spec] += 1\n all_specs.append(specs_tens)\n\n gens_tens = torch.zeros(num_gens)\n gens_tens[gen] += 1\n all_gens.append(gens_tens)\n\n fams_tens = torch.zeros(num_fams)\n fams_tens[fam] += 1\n all_fams.append(fams_tens)\n imgs.append(img)\n return torch.stack(all_specs), torch.stack(all_gens), torch.stack(all_fams), torch.from_numpy(np.stack(imgs))\n\ndef joint_raster_collate_fn(batch):\n # batch is a list of tuples of (specs_label, gens_label, fams_label, images, env_rasters) \n all_specs = []\n all_gens = []\n all_fams = []\n imgs = []\n rasters = []\n #(specs_label, gens_label, fams_label, images, env_rasters) \n for (spec, gen, fam, img, raster, _) in batch:\n specs_tens = torch.zeros(num_specs)\n specs_tens[spec] += 1\n all_specs.append(specs_tens)\n\n gens_tens = torch.zeros(num_gens)\n gens_tens[gen] += 1\n all_gens.append(gens_tens)\n\n fams_tens = torch.zeros(num_fams)\n fams_tens[fam] += 1\n all_fams.append(fams_tens)\n imgs.append(img)\n rasters.append(raster)\n return torch.stack(all_specs), torch.stack(all_gens), torch.stack(all_fams), torch.from_numpy(np.stack(imgs)), torch.from_numpy(np.stack(rasters))\n\n# def joint_rasteronly_collate_fn(batch):\n# # batch is a list of tuples of (specs_label, gens_label, fams_label, images, env_rasters) \n# all_specs = []\n# all_gens = []\n# all_fams = []\n# rasters = []\n# #(specs_label, gens_label, fams_label, images, env_rasters) \n# for (spec, gen, fam, raster) in batch:\n# specs_tens = torch.zeros(num_specs)\n# specs_tens[spec] += 1\n# all_specs.append(specs_tens)\n\n# gens_tens = torch.zeros(num_gens)\n# gens_tens[gen] += 1\n# all_gens.append(gens_tens)\n\n# fams_tens = torch.zeros(num_fams)\n# fams_tens[fam] += 1\n# all_fams.append(fams_tens)\n# rasters.append(raster)\n# return torch.stack(all_specs), torch.stack(all_gens), torch.stack(all_fams), torch.from_numpy(np.stack(rasters))\n\n \n \n \ndef test_batch(test_loader, tb_writer, device, net, observation, epoch, loss, model, dataset):\n if model == 'SpecOnly':\n if observation == 'single' or observation == 'single_single':\n return test_single_speconly_batch(test_loader, tb_writer, device, net, epoch)\n else:\n return test_joint_speconly_batch(test_loader, tb_writer, device, net, epoch)\n elif model == 'MLP_Family':\n if observation == 'single' or observation == 'single_single':\n return test_single_obs_fam(test_loader, tb_writer, device, net, epoch)\n else:\n return test_joint_obs_fam(test_loader, tb_writer, device, net, epoch)\n elif model == 'MLP_Family_Genus':\n if observation == 'single' or observation == 'single_single':\n return test_single_obs_rastersonly_famgen(test_loader, tb_writer, device, net, epoch)\n else:\n return test_joint_obs_rastersonly_famgen(test_loader, tb_writer, device, net, epoch)\n\n elif dataset == 'satellite_rasters_point':\n if observation == 'single' or observation == 'single_single':\n return test_single_obs_rasters_batch(test_loader, tb_writer, device, net, epoch)\n else:\n return test_joint_obs_rasters_batch(test_loader, tb_writer, device, net, epoch)\n else:\n if observation == 'single' or observation == 'single_single':\n return test_single_obs_batch(test_loader, tb_writer, device, net, epoch)\n else:\n return test_joint_obs_batch(test_loader, tb_writer, device, net, epoch)\n \n \ndef test_single_obs_batch(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n all_accs = []\n all_spec = []\n all_gen = []\n all_fam = []\n for i, (specs_label, gens_label, fams_label, loaded_imgs) in enumerate(test_loader):\n\n specs_label = specs_label.to(device)\n gens_label = gens_label.to(device)\n fams_label = fams_label.to(device)\n batch = batch.to(device)\n (outputs, genus, family) = net(batch.float())\n spec_accs = utils.topk_acc(outputs, specs_label, topk=(30,1), device=device) # magic no from CELF2020\n gens_accs = utils.topk_acc(genus, gens_label, topk=(30,1), device=device) # magic no from CELF2020\n fam_accs = utils.topk_acc(family, fams_label, topk=(30,1), device=device) # magic no from CELF2020\n prog.set_description(\"top 30: {acc0} top1: {acc1}\".format(acc0=spec_accs[0], acc1=spec_accs[1]))\n all_spec.append(spec_accs)\n all_gen.append(gens_accs)\n all_fam.append(fam_accs)\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/30_spec_accuracy\", spec_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_spec_accuracy\", spec_accs[1], epoch) \n\n tb_writer.add_scalar(\"test/30_gen_accuracy\", gens_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_gen_accuracy\", gens_accs[1], epoch) \n\n tb_writer.add_scalar(\"test/30_fam_accuracy\", fam_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_fam_accuracy\", fam_accs[1], epoch) \n\n prog.update(1)\n prog.close()\n return all_spec, all_gen, all_fam\n \n #TODO: fix to work with recall_per_example\ndef test_joint_obs_rasters_batch(test_loader, tb_writer, device, net, epoch):\n\n allspecrec = []\n allspecprec = []\n allspecacc = []\n allspecrectop1 = []\n allspecprectop1 = []\n allgenacc = []\n allfamacc = []\n \n allspec = []\n allgen = []\n allfam = []\n net.eval() \n sampler = test_loader.sampler\n dataset = test_loader.dataset\n with tqdm(total=len(sampler), unit=\"example\") as prog:\n for i, idx in enumerate(sampler):\n (specs_label, gens_label, fams_label, all_specs, all_gens, all_fams, loaded_imgs, env_rasters) = dataset.infer_item(idx) \n imgs = torch.from_numpy(np.expand_dims(loaded_imgs, axis=0)).to(device)\n env_rasters = torch.from_numpy(np.expand_dims(env_rasters, axis=0)).to(device) \n (outputs, gens, fams) = net(imgs.float(), env_rasters.float()) \n \n \n\n spec_weight = dataset.spec_freqs[specs_label]\n all_specs = torch.tensor(all_specs, device=device)\n all_gens = torch.tensor(all_gens, device=device)\n all_fams = torch.tensor(all_fams, device=device) \n specrec, specrectop1, _, _ = utils.recall_per_example(all_specs, outputs, specs_label, spec_weight, device)\n specprec, specprectop, _, _ = utils.precision_per_example(all_specs, outputs, specs_label, spec_weight, device)\n specacc, _, _ = utils.accuracy_per_example(all_specs, outputs) \n genacc, _, _ = utils.accuracy_per_example(all_gens, gens) \n famacc, _, _ = utils.accuracy_per_example(all_fams, fams) \n \n allspecrec.append(specrec)\n allspecprec.append(specprec)\n allspecacc.append(specacc)\n allspecrectop1.append(specrectop1)\n allspecprectop1.append(specprectop)\n allgenacc.append(genacc)\n allfamacc.append(famacc)\n prog.set_description(\"mean recall across batch: {acc0}\".format(acc0=specrec))\n prog.update(1) \n allspec.append((specrec, specprec, specacc, specrectop1, specprectop))\n allfam.append(famacc)\n allgen.append(genacc)\n print(\"species recall {}, precision {}, and accuracy {}\".format(mean(allspecrec), mean(allspecprec), mean(allspecacc)))\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_fam_accuracy\", mean(allfamacc) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_accuracy\", mean(allspecacc) * 100, epoch) \n tb_writer.add_scalar(\"test/avg_gen_accuracy\", mean(allgenacc) * 100, epoch) \n tb_writer.add_scalar(\"test/avg_spec_prec_top1\", mean(allspecprectop1) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_rec_top1\", mean(allspecrectop1) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_recall\", mean(allspecrec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_precision\", mean(allspecprec) * 100, epoch) \n\n prog.close()\n net.train()\n return allfam, allgen, allspec\n \n\n \n \ndef test_single_obs_rasters_batch(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n allspec = []\n allgen = []\n allfam = []\n for i, (specs_label, gens_label, fams_label, imgs, env_rasters) in enumerate(test_loader):\n imgs = imgs.to(device)\n env_rasters = env_rasters.to(device)\n specs_lab = specs_label.to(device) \n gens_label = gens_label.to(device)\n fams_label = fams_label.to(device)\n (outputs, gens, fams) = net(imgs.float(), env_rasters.float()) \n spec_accs = utils.topk_acc(outputs, specs_label, topk=(30,1), device=device) # magic no from CELF2020\n gens_accs = utils.topk_acc(gens, gens_label, topk=(30,1), device=device) # magic no from CELF2020\n fam_accs = utils.topk_acc(fams, fams_label, topk=(30,1), device=device) # magic no from CELF2020\n prog.set_description(\"top 30: {acc0} top1: {acc1}\".format(acc0=spec_accs[0], acc1=spec_accs[1]))\n all_spec.append(spec_accs)\n all_gen.append(gens_accs)\n all_fam.append(fam_accs)\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/30_spec_accuracy\", spec_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_spec_accuracy\", spec_accs[1], epoch) \n\n tb_writer.add_scalar(\"test/30_gen_accuracy\", gens_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_gen_accuracy\", gens_accs[1], epoch) \n\n tb_writer.add_scalar(\"test/30_fam_accuracy\", fam_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_fam_accuracy\", fam_accs[1], epoch) \n allspec.append(totspec_accs)\n allgen.append(totgen_accs)\n allfam.append(totfam_accs)\n prog.close()\n return allfam, allgen, allspec\n\ndef test_joint_obs_fam(test_loader, tb_writer, device, net, epoch):\n means = []\n all_accs = []\n mean_accs = []\n all_frec = [] \n all_tf = [] \n sampler = test_loader.sampler\n dataset = test_loader.dataset\n with tqdm(total=len(sampler), unit=\"example\") as prog:\n for i, idx in enumerate(sampler):\n # return (specs_label, gens_label, fams_label, all_spec, all_gen, all_fam, images)\n (_, _, fams_label, _, _, all_fams, loaded_imgs) = dataset.infer_item(idx) \n loaded_imgs = torch.from_numpy(np.expand_dims(loaded_imgs, axis=0)).to(device)\n fams = net(loaded_imgs.float()) \n weight = dataset.fam_freqs[fams_label]\n famrec, famtop1 = utils.recall_per_example(fams, all_fams, fams_label, weight) # magic no from CELF2020 \n all_frec.append(famrec)\n all_tf.append(famtop1) \n prog.set_description(\"mean recall across batch: {acc0}\".format(acc0=famrec))\n prog.update(1) \n all_accs.append(famrec)\n mean_accs.append(famtop1)\n means.append(famrec * 100)\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_fam_recall\", mean(all_frec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_fam_top1_recall\", mean(all_tf) * 100, epoch) \n prog.close()\n return means, all_accs, mean_accs\n\ndef test_single_obs_fam(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n means = []\n all_accs = []\n mean_accs = []\n for i, (_, _, fams_label, env_rasters) in enumerate(test_loader):\n env_rasters = env_rasters.to(device)\n fams_label = fams_label.to(device)\n fams = net(env_rasters.float()) \n fam_accs = utils.topk_acc(fams, fams_label, topk=(30,1), device=device) # magic no from CELF2020\n prog.set_description(\"top 30: {acc0} top1: {acc1}\".format(acc0=fam_accs[0], acc1=fam_accs[1]))\n prog.update(1) \n if tb_writer is not None:\n tb_writer.add_scalar(\"test/30_fam_accuracy\", fam_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_fam_accuracy\", fam_accs[1], epoch) \n all_accs.append(fam_accs)\n mean_accs.append(fam_accs)\n means.append(fam_accs.mean())\n prog.close()\n return means, all_accs, mean_accs\n\ndef test_joint_obs_rastersonly_all(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n means = []\n all_accs = []\n mean_accs = []\n for i, (specs_label, gens_label, fams_label, env_rasters) in enumerate(test_loader):\n env_rasters = env_rasters.to(device)\n fams_label = fams_label.to(device)\n gens_label = gens_label.to(device)\n specs_label = specs_label.to(device)\n fams, gens, specs = net(env_rasters.float()) \n genaccs, totgen_accs = utils.num_corr_matches(gens, gens_label) # magic no from CELF2020 \n famaccs, totfam_accs = utils.num_corr_matches(fams, fams_label) # magic no from CELF2020 \n specaccs, totspec_accs = utils.num_corr_matches(specs, specs_label) # magic no from CELF2020 \n #TODO: add other accuracy metrics??\n prog.set_description(\"mean accuracy across batch: {acc0}\".format(acc0=specaccs.mean()))\n prog.update(1) \n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_fam_accuracy\", famaccs.mean(), epoch)\n tb_writer.add_scalar(\"test/avg_gen_accuracy\", genaccs.mean(), epoch)\n tb_writer.add_scalar(\"test/avg_spec_accuracy\", specaccs.mean(), epoch)\n all_accs.append(totfam_accs)\n mean_accs.append(totgen_accs)\n means.append(totspec_accs)\n prog.close()\n return means, all_accs, mean_accs\n\ndef mean(lst): \n return sum(lst) / len(lst) \n\ndef test_joint_obs_rastersonly_famgen(test_loader, tb_writer, device, net, epoch):\n means = []\n all_accs = []\n mean_accs = []\n all_grec = []\n all_frec = []\n all_tg = []\n all_tf = []\n sampler = test_loader.sampler\n dataset = test_loader.dataset\n with tqdm(total=len(sampler), unit=\"example\") as prog:\n for i, idx in enumerate(sampler):\n # return (specs_label, gens_label, fams_label, all_spec, all_gen, all_fam, images)\n (_, gens_label, fams_label, _, all_gens, all_fams, env_rasters) = dataset.infer_item(idx) \n env_rasters= torch.from_numpy(np.expand_dims(env_rasters, axis=0)).to(device)\n fams, gens = net(env_rasters.float()) \n fam_weight = dataset.fam_freqs[fams_label]\n gen_weight = dataset.gen_freqs[gens_label]\n famrec, famtop1 = utils.recall_per_example(fams, all_fams, fams_label, fam_weight) # magic no from CELF2020 \n genrec, gentop1 = utils.recall_per_example(gens, all_gens, gens_label, gen_weight) # magic no from CELF2020 \n all_grec.append(genrec)\n all_frec.append(famrec)\n all_tg.append(gentop1)\n all_tf.append(famtop1)\n prog.set_description(\"mean recall across batch: {acc0}\".format(acc0=genrec))\n prog.update(1) \n all_accs.append((famrec, genrec))\n mean_accs.append((famtop1, gentop1))\n means.append((famrec * 100, genrec * 100))\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_fam_recall\", mean(all_frec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_fam_top1_recall\", mean(all_tf) * 100, epoch) \n tb_writer.add_scalar(\"test/avg_gen_recall\", mean(all_grec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_gen_top1_recall\", mean(all_tg) * 100, epoch) \n prog.close()\n return means, all_accs, mean_accs \n \n \ndef test_single_obs_rastersonly_famgen(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n means = []\n all_accs = []\n mean_accs = []\n for i, (_, gens_label, fams_label, env_rasters) in enumerate(test_loader):\n env_rasters = env_rasters.to(device)\n fams_label = fams_label.to(device)\n gens_label = gens_label.to(device)\n fams, gens = net(env_rasters.float()) \n gens_accs = utils.topk_acc(gens, gens_label, topk=(30,1), device=device) # magic no from CELF2020\n fam_accs = utils.topk_acc(fams, fams_label, topk=(30,1), device=device) # magic no from CELF2020\n\n prog.set_description(\"top 30: {acc0} top1: {acc1}\".format(acc0=gens_accs[0], acc1=gens_accs[1]))\n\n prog.update(1) \n if tb_writer is not None:\n\n tb_writer.add_scalar(\"test/30_gen_accuracy\", gens_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_gen_accuracy\", gens_accs[1], epoch) \n\n tb_writer.add_scalar(\"test/30_fam_accuracy\", fam_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_fam_accuracy\", fam_accs[1], epoch) \n all_accs.append(fam_accs)\n mean_accs.append(gens_accs)\n means.append(gens_accs.mean())\n prog.close()\n return means, all_accs, mean_accs\n\ndef test_joint_obs_batch(test_loader, tb_writer, device, net, epoch):\n \n allspecrec = []\n allspecprec = []\n allspecacc = []\n allspecrectop1 = []\n allspecprectop1 = []\n allgenacc = []\n allfamacc = []\n \n allspec = []\n allgen = []\n allfam = []\n net.eval() \n sampler = test_loader.sampler\n dataset = test_loader.dataset\n with tqdm(total=len(sampler), unit=\"example\") as prog:\n for i, idx in enumerate(sampler):\n (specs_label, gens_label, fams_label, all_specs, all_gens, all_fams, loaded_imgs) = dataset.infer_item(idx) \n # what you use for recreating labels if trianing from restart\n# ob = dataset.occs.iloc[idx]\n# sp = ob.species\n# # reconstruct true label using class mapping from model\n# all_specs = [dataset.spec_dict[s] for s in ob.all_specs_name]\n# specs_label = dataset.spec_dict[ob.species] \n\n batch = torch.from_numpy(np.expand_dims(loaded_imgs, axis=0)).to(device)\n (outputs, gens, fams) = net(batch.float())\n spec_weight = dataset.spec_freqs[specs_label]\n all_specs = torch.tensor(all_specs, device=device)\n all_gens = torch.tensor(all_gens, device=device)\n all_fams = torch.tensor(all_fams, device=device) \n specrec, specrectop1, _, _ = utils.recall_per_example(all_specs, outputs, specs_label, spec_weight, device)\n specprec, specprectop, _, _ = utils.precision_per_example(all_specs, outputs, specs_label, spec_weight, device)\n specacc, _, _ = utils.accuracy_per_example(all_specs, outputs) \n genacc, _, _ = utils.accuracy_per_example(all_gens, gens) \n famacc, _, _ = utils.accuracy_per_example(all_fams, fams) \n \n allspecrec.append(specrec)\n allspecprec.append(specprec)\n allspecacc.append(specacc)\n allspecrectop1.append(specrectop1)\n allspecprectop1.append(specprectop)\n allgenacc.append(genacc)\n allfamacc.append(famacc)\n prog.set_description(\"mean recall across batch: {acc0}\".format(acc0=specrec))\n prog.update(1) \n allspec.append((specrec, specprec, specacc, specrectop1, specprectop))\n allfam.append(famacc)\n allgen.append(genacc)\n print(\"species recall {}, precision {}, and accuracy {}\".format(mean(allspecrec), mean(allspecprec), mean(allspecacc)))\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_fam_accuracy\", mean(allfamacc) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_accuracy\", mean(allspecacc) * 100, epoch) \n tb_writer.add_scalar(\"test/avg_gen_accuracy\", mean(allgenacc) * 100, epoch) \n tb_writer.add_scalar(\"test/avg_spec_prec_top1\", mean(allspecprectop1) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_rec_top1\", mean(allspecrectop1) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_recall\", mean(allspecrec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_precision\", mean(allspecprec) * 100, epoch) \n net.train() \n prog.close()\n return allfam, allgen, allspec\n\ndef test_joint_speconly_batch(test_loader, tb_writer, device, net, epoch):\n\n means = []\n all_accs = []\n mean_accs = []\n all_spec = []\n all_sp1 = []\n sampler = test_loader.sampler\n dataset = test_loader.dataset\n with tqdm(total=len(sampler), unit=\"example\") as prog:\n for i, idx in enumerate(sampler):\n # specs label is top1, all_spec is all species\n (specs_label, _, _, all_spec, _, _, loaded_imgs) = dataset.infer_item(idx)\n batch = torch.from_numpy(np.expand_dims(loaded_imgs, axis=0)).to(device)\n outputs = net(batch.float()) \n # recall, top1_recall\n spec_weight = dataset.spec_freqs[specs_label]\n specrec, spectop1 = utils.recall_per_example(outputs, all_spec, specs_label, spec_weight) # magic no from CELF2020\n prog.set_description(\"mean recall across batch: {acc0}\".format(acc0=specrec))\n all_spec.append(specrec)\n all_sp1.append(spectop1)\n prog.update(1) \n all_accs.append(specrec)\n mean_accs.append(spectop1)\n means.append(specrec * 100)\n if tb_writer is not None:\n tb_writer.add_scalar(\"test/avg_spec_recall\", mean(all_spec) * 100, epoch)\n tb_writer.add_scalar(\"test/avg_spec_top1_recall\", mean(all_sp1) * 100, epoch) \n \n prog.close()\n return means, all_accs, mean_accs\n \ndef test_single_speconly_batch(test_loader, tb_writer, device, net, epoch):\n with tqdm(total=len(test_loader), unit=\"batch\") as prog:\n means = []\n all_accs = []\n mean_accs = []\n for i, (specs_label, _, _, loaded_imgs) in enumerate(test_loader):\n batch = loaded_imgs.to(device)\n specs_lab = specs_label.to(device) \n outputs = net(batch.float()) \n spec_accs = utils.topk_acc(outputs, specs_label, topk=(30,1), device=device) # magic no from CELF2020\n prog.set_description(\"top 30: {acc0} top1: {acc1}\".format(acc0=spec_accs[0], acc1=spec_accs[1]))\n\n prog.update(1) \n if tb_writer is not None:\n tb_writer.add_scalar(\"test/30_spec_accuracy\", spec_accs[0], epoch)\n tb_writer.add_scalar(\"test/1_spec_accuracy\", spec_accs[1], epoch) \n\n all_accs.append(spec_accs)\n mean_accs.append(spec_accs)\n means.append(spec_accs.mean())\n prog.close()\n return means, all_accs, mean_accs\n\n\n\ndef train_batch(dataset, train_loader, device, optimizer, net, spec_loss, gen_loss, fam_loss, tb_writer, step, model, nepoch, epoch, loss):\n tot_loss_meter = []\n spec_loss_meter = []\n gen_loss_meter = []\n fam_loss_meter = [] \n with tqdm(total=len(train_loader), unit=\"batch\") as prog:\n for i, ret in enumerate(train_loader): \n specophs = nepoch\n genpoch = nepoch * 2\n fampoch = nepoch \n\n # mixed data model MLP of environmental rasters + cnn of satellite imagery data\n if dataset == 'satellite_rasters_point':\n\n (specs_lab, gens_lab, fams_lab, batch, rasters) = ret\n if loss == 'all':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all')\n elif loss == 'cumulative':\n if epoch < fampoch:\n # family only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif epoch >= fampoch and epoch < genpoch:\n # family and genus\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'fam_gen') \n elif loss == 'just_spec':\n # all 3 / spec only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all') \n else: # all the custom loss names\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all') \n elif loss == 'sequential':\n \n if epoch < fampoch:\n # family only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif epoch >= fampoch and epoch < genpoch:\n # genus\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'genus') \n else:\n # spec only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'species')\n elif loss == 'just_fam':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif loss == 'fam_gen':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'fam_gen')\n # cnn model that goes straight from cnn to species outpute layer\n elif model == 'SpecOnly':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'species')\n else: # loss is custom\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all') \n elif model == 'SpecOnly':\n (specs_lab, gens_lab, fams_lab, batch) = ret\n tot_loss, loss_spec = forward_one_example_speconly(specs_lab, batch, optimizer, net, spec_loss, device)\n loss_fam, loss_gen = None, None\n elif model == 'MLP_Family':\n (specs_lab, gens_lab, fams_lab, batch) = ret\n tot_loss, loss_fam = forward_one_example_speconly(fams_lab, batch, optimizer, net, fam_loss, device)\n loss_spec, loss_gen = None, None\n elif model == 'MLP_Family_Genus':\n (specs_lab, gens_lab, fams_lab, batch) = ret\n tot_loss, loss_gen, loss_fam = forward_one_example_famgen(fams_lab, gens_lab, batch, optimizer, net, fam_loss, gen_loss, device)\n loss_spec = None\n #if dataset != 'satellite_rasters_point:\n else:\n (specs_lab, gens_lab, fams_lab, batch) = ret\n if loss == 'all':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all')\n elif loss == 'cumulative':\n if epoch < fampoch:\n # family only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif epoch >= fampoch and epoch < genpoch:\n # family and genus\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'fam_gen')\n else:\n # all 3 \n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all')\n \n elif loss == 'sequential':\n if epoch < fampoch:\n # family only\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif epoch >= fampoch and epoch < genpoch:\n # family and genus\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'genus')\n else:\n # all 3 \n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'species')\n \n elif loss == 'just_fam':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'family')\n elif loss == 'fam_gen':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'fam_gen')\n elif model == 'SpecOnly':\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example_speconly(specs_lab, batch, optimizer, net, spec_loss, device)\n else: # loss is none or one of the new options! None of the cumulative nonsense, just run loss normally\n tot_loss, loss_spec, loss_gen, loss_fam = forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, 'all')\n \n if tb_writer is not None:\n if model == 'SpecOnly':\n tb_writer.add_scalar(\"train/tot_loss\", tot_loss, step)\n tb_writer.add_scalar(\"train/spec_loss\", loss_spec.item(), step)\n tot_loss_meter.append(tot_loss.item()) \n spec_loss_meter.append(loss_spec.item())\n elif model == 'MLP_Family':\n tb_writer.add_scalar(\"train/tot_loss\", tot_loss, step)\n tb_writer.add_scalar(\"train/fam_loss\", loss_fam.item(), step)\n tot_loss_meter.append(tot_loss.item()) \n fam_loss_meter.append(loss_fam.item())\n elif model == 'MLP_Family_Genus':\n tb_writer.add_scalar(\"train/tot_loss\", tot_loss, step)\n tb_writer.add_scalar(\"train/fam_loss\", loss_fam.item(), step)\n tb_writer.add_scalar(\"train/gen_loss\", loss_gen.item(), step)\n tot_loss_meter.append(tot_loss.item()) \n gen_loss_meter.append(loss_gen.item())\n fam_loss_meter.append(loss_fam.item()) \n else:\n tb_writer.add_scalar(\"train/tot_loss\", tot_loss, step)\n tb_writer.add_scalar(\"train/spec_loss\", loss_spec.item(), step)\n tb_writer.add_scalar(\"train/fam_loss\", loss_fam.item(), step)\n tb_writer.add_scalar(\"train/gen_loss\", loss_gen.item(), step)\n tot_loss_meter.append(tot_loss.item()) \n spec_loss_meter.append(loss_spec.item())\n gen_loss_meter.append(loss_gen.item())\n fam_loss_meter.append(loss_fam.item()) \n prog.set_description(\"loss: {tot_loss}\".format(tot_loss=tot_loss))\n prog.update(1) \n step += 1\n \n return tot_loss_meter, spec_loss_meter, gen_loss_meter, fam_loss_meter, step \n\n\ndef forward_one_example_rasters(specs_lab, gens_lab, fams_lab, batch, rasters, optimizer, net, spec_loss, gen_loss, fam_loss, device, calculated):\n batch = batch.to(device)\n rasters = rasters.to(device)\n specs_lab = specs_lab.to(device) \n gens_lab = gens_lab.to(device)\n fams_lab = fams_lab.to(device)\n optimizer.zero_grad()\n (specs, gens, fams) = net(batch.float(), rasters.float()) \n loss_spec = spec_loss(specs, specs_lab) \n loss_gen = gen_loss(gens, gens_lab) \n loss_fam = fam_loss(fams, fams_lab) \n if calculated == 'species':\n total_loss = loss_spec\n elif calculated == 'family':\n total_loss = loss_fam\n elif calculated == 'genus':\n total_loss = loss_gen\n elif calculated == 'fam_gen':\n total_loss = loss_gen + loss_fam\n else:\n total_loss = loss_spec + loss_gen + loss_fam\n total_loss.backward()\n optimizer.step()\n return total_loss, loss_spec, loss_gen, loss_fam\n\n \ndef forward_one_example_speconly(specs_lab, batch, optimizer, net, spec_loss, device):\n batch = batch.to(device)\n# import pdb; pdb.set_trace() \n# specs_lab = torch.tensor(specs_lab, device=device)\n# print(type(specs_lab), specs_lab.shape)\n specs_lab =specs_lab[0]\n specs_lab = specs_lab.to(device) \n optimizer.zero_grad()\n specs = net(batch.float()) \n loss_spec = spec_loss(specs, specs_lab) \n total_loss = loss_spec\n total_loss.backward()\n optimizer.step()\n return total_loss, loss_spec\n\ndef forward_one_example_famgen(fams_lab, gens_lab, batch, optimizer, net, fams_loss, gens_loss, device):\n batch = batch.to(device)\n fams_lab = fams_lab.to(device)\n gens_lab = gens_lab.to(device)\n optimizer.zero_grad()\n fams, gens = net(batch.float()) \n loss_fam = fams_loss(fams, fams_lab) \n loss_gen = gens_loss(gens, gens_lab) \n total_loss = loss_fam + loss_gen\n total_loss.backward()\n optimizer.step()\n return total_loss, loss_gen, loss_fam\n\ndef forward_one_example(specs_lab, gens_lab, fams_lab, batch, optimizer, net, spec_loss, gen_loss, fam_loss, device, calculated):\n\n batch = batch.to(device)\n specs_lab = specs_lab.to(device)\n gens_lab = gens_lab.to(device)\n fams_lab = fams_lab.to(device)\n optimizer.zero_grad()\n (specs, gens, fams) = net(batch.float()) \n loss_spec = spec_loss(specs, specs_lab) \n loss_gen = gen_loss(gens, gens_lab) \n loss_fam = fam_loss(fams, fams_lab)\n if calculated == 'species':\n total_loss = loss_spec\n elif calculated == 'family':\n total_loss = loss_fam\n elif calculated == 'genus':\n total_loss = loss_gen\n elif calculated == 'fam_gen':\n total_loss = loss_gen + loss_fam\n else:\n total_loss = loss_spec + loss_gen + loss_fam\n total_loss.backward()\n optimizer.step()\n return total_loss, loss_spec, loss_gen, loss_fam\n\n\n\ndef train_model(ARGS, params):\n\n print(\"torch version {}\".format(torch.__version__))\n# print(params.params.device, ARGS.device, \" hello3\")\n print(\"number of devices visible: {dev}\".format(dev=torch.cuda.device_count()))\n device = torch.device(\"cuda:{dev}\".format(dev=ARGS.device) if ARGS.device >= 0 else \"cpu\")\n print('using device: {device}'.format(device=device))\n if ARGS.device >= 0:\n torch.cuda.set_device(device)\n print(\"current device: {dev} current device name: {name}\".format(dev=torch.cuda.current_device(), name=torch.cuda.get_device_name(torch.cuda.current_device())))\n print(\"current host: {host}\".format(host=socket.gethostname()))\n batch_size=params.params.batch_size\n n_epochs=ARGS.epoch\n # load observation data\n print(\"loading data\")\n datick = time.time()\n train_dataset = setup_dataset(params.params.observation, ARGS.base_dir, params.params.organism, params.params.region, params.params.normalize, params.params.no_altitude, params.params.dataset, params.params.threshold,ARGS.num_species, ARGS.inc_latlon)\n if not ARGS.toy_dataset:\n tb_writer = SummaryWriter(comment=\"_lr-{}_mod-{}_reg-{}_obs-{}_dat-{}org-{}_loss-{}_norm-{}_exp_id-{}\".format(params.params.lr, params.params.model, params.params.region, params.params.observation, params.params.dataset, params.params.organism, params.params.loss, params.params.normalize, params.params.exp_id))\n\n else:\n tb_writer = None\n# train_dataset.obs = train_dataset.obs[:params.batch_size*2]\n print(\"setting up network\")\n # global so can access in collate_fn easily\n global num_specs \n num_specs = train_dataset.num_specs\n global num_fams\n num_fams = train_dataset.num_fams\n global num_gens\n num_gens = train_dataset.num_gens \n start_epoch = None\n step = None \n # pretrained, batch_norm, arch_type):\n net = setup_model(params.params.model, train_dataset, params.params.pretrained, params.params.batch_norm, params.params.arch_type)\n net.to(device)\n optimizer = optim.Adam(net.parameters(), lr=params.params.lr)\n if ARGS.from_scratch or not ARGS.load_from_config:\n start_epoch = 0\n step = 0 \n train_samp, test_samp, idxs = better_split_train_test(train_dataset) \n else:\n net_load = params.get_recent_model(device=device)\n net.load_state_dict(net_load['model_state_dict'])\n net.to(device)\n optimizer = optim.Adam(net.parameters(), lr=params.params.lr)\n optimizer.load_state_dict(net_load['optimizer_state_dict'])\n start_epoch = net_load['epoch']\n step = net_load['step']\n print(\"loading model from epoch {}\".format(start_epoch))\n train_idx, test_idx = train_dataset.train, train_dataset.test\n train_samp = SubsetRandomSampler(train_idx)\n test_samp = SubsetRandomSampler(test_idx) \n idxs = {'train' : train_idx, 'test' : test_idx}\n desi = params.get_most_recent_des()\n train_dataset.inv_spec = desi['inv_spec']\n train_dataset.spec_dict = desi['spec_dict']\n train_dataset.gen_dict = desi['gen_dict']\n train_dataset.fam_dict = desi['fam_dict'] \n\n \n print(\"setting up loss\") \n spec_loss, gen_loss, fam_loss = setup_loss(params.params.observation, train_dataset, params.params.loss, params.params.unweighted, device, params.params.loss_type, params.params.model) \n print(\"setting up dataset\")\n if ARGS.toy_dataset:\n\n test_dataset = copy.deepcopy(train_dataset)\n train_dataset.obs = train_dataset.obs[:100]\n test_dataset.obs = test_dataset.obs[100:200]\n train_loader = setup_dataloader(train_dataset, params.params.dataset, batch_size, ARGS.processes, SubsetRandomSampler(np.arange(100)), ARGS.model, joint_collate_fn)\n test_loader = setup_dataloader(test_dataset, params.params.dataset, batch_size, ARGS.processes, SubsetRandomSampler(np.arange(100)), ARGS.model, joint_collate_fn)\n \n else:\n if params.params.dataset == 'satellite_rasters_point':\n collate_fn = joint_raster_collate_fn\n else:\n collate_fn = joint_collate_fn\n \n train_loader = setup_dataloader(train_dataset, params.params.dataset, batch_size, ARGS.processes, train_samp, ARGS.model, collate_fn)\n test_loader = setup_dataloader(train_dataset, params.params.dataset, batch_size, ARGS.processes, test_samp, ARGS.model, collate_fn)\n \n\n \n datock = time.time()\n dadiff = datock - datick\n print(\"loading data took {dadiff} seconds\".format(dadiff=dadiff))\n print(\"number of channels are {}\".format(train_dataset.channels))\n num_batches = math.ceil(len(train_dataset) / batch_size)\n print(\"batch size is {batch_size} and size of dataset is {lens} and num batches is {num_batches}\\n\".format(batch_size=batch_size, lens=len(train_dataset), num_batches=len(train_loader)))\n print(\"starting training\") \n all_time_loss = []\n all_time_sp_loss = []\n all_time_gen_loss = []\n all_time_fam_loss = [] \n \n if params.params.loss == 'sequential' or params.params.loss == 'cumulative':\n tot_epoch = n_epochs\n n_epochs = n_epochs*3\n print(\"total number of epochs: {} number of epochs of each loss type: {}\", n_epochs, tot_epoch)\n else:\n tot_epoch = n_epochs\n epoch = start_epoch\n while epoch < n_epochs:\n print(\"starting training for epoch {}\".format(epoch))\n tick = time.time()\n net.train()\n print(\"before batch\")\n \n tot_loss_meter, spec_loss_meter, gen_loss_meter, fam_loss_meter, step = train_batch(params.params.dataset, train_loader, device, optimizer, net, spec_loss, gen_loss, fam_loss, tb_writer, step, params.params.model, tot_epoch, epoch, params.params.loss)\n print('after batch')\n #TODO change back!!!\n if not ARGS.toy_dataset:\n\n if params.params.model == 'SpecOnly':\n all_time_loss.append(np.stack(tot_loss_meter))\n all_time_sp_loss.append(np.stack(spec_loss_meter))\n all_time_gen_loss = []\n all_time_fam_loss =[]\n elif params.params.model == 'MLP_Family':\n all_time_loss.append(np.stack(tot_loss_meter))\n all_time_sp_loss = []\n all_time_gen_loss = []\n all_time_fam_loss.append(np.stack(fam_loss_meter))\n elif params.params.model == 'MLP_Family_Genus':\n all_time_loss.append(np.stack(tot_loss_meter))\n all_time_sp_loss = []\n all_time_gen_loss.append(np.stack(gen_loss_meter))\n all_time_fam_loss.append(np.stack(fam_loss_meter))\n else:\n all_time_loss.append(np.stack(tot_loss_meter))\n all_time_sp_loss.append(np.stack(spec_loss_meter))\n all_time_gen_loss.append(np.stack(gen_loss_meter))\n all_time_fam_loss.append(np.stack(fam_loss_meter))\n nets_path=params.build_abs_nets_path(epoch)\n print('nets path ', nets_path)\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': net.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'step' : step\n }, nets_path) \n\n # test\n net.eval()\n all_accs = []\n print(\"testing model\")\n with torch.no_grad():\n means, all_accs, mean_accs = test_batch(test_loader, tb_writer, device, net, params.params.observation, epoch, params.params.loss, params.params.model, params.params.dataset)\n\n#TODO uncomment below line!!!\n if not ARGS.toy_dataset:\n desiderata = {\n 'all_loss': all_time_loss,\n 'spec_loss': all_time_sp_loss,\n 'gen_loss': all_time_gen_loss,\n 'fam_loss': all_time_fam_loss,\n 'means': means,\n 'all_accs': all_accs,\n 'mean_accs': mean_accs,\n 'splits' : idxs,\n 'batch_size': batch_size,\n 'inv_spec' : train_dataset.inv_spec, \n 'spec_dict' : train_dataset.spec_dict, \n 'gen_dict' : train_dataset.gen_dict, \n 'fam_dict': train_dataset.fam_dict,\n 'image_means': train_dataset.dataset_means\n }\n desiderata_path = params.build_abs_desider_path(epoch)\n with open(desiderata_path, 'wb') as f:\n pickle.dump(desiderata, f)\n tock = time.time()\n diff = ( tock-tick)/60\n print (\"epoch {} took {} minutes\".format(epoch, diff))\n epoch += 1\n if not ARGS.toy_dataset:\n tb_writer.close()\n\nif __name__ == \"__main__\":\n np.testing.suppress_warnings()\n args = ['load_from_config','lr', 'epoch', 'device', 'toy_dataset', 'loss', 'processes', 'exp_id', 'base_dir', 'region', 'organism', 'seed', 'observation', 'batch_size', 'model', 'normalize', 'unweighted', 'no_alt', 'from_scratch', 'dataset', 'threshold', 'loss_type', 'num_species', 'batch_norm', 'pretrained', 'arch_type', 'inc_latlon','pretrained_dset']\n ARGS = config.parse_known_args(args) \n config.setup_main_dirs(ARGS.base_dir)\n if ARGS.epoch < 0:\n raise TypeError(\"must have a value set for the number of epochs to run the model\")\n print('epoch', ARGS.epoch)\n print('load from config ', ARGS.load_from_config)\n if ARGS.load_from_config is not None:\n params = config.Run_Params(ARGS.base_dir, ARGS)\n else:\n params = config.Run_Params(ARGS.base_dir, ARGS)\n params.setup_run_dirs(ARGS.base_dir)\n train_model(ARGS, params)\n"
]
| [
[
"torch.zeros",
"torch.cuda.current_device",
"torch.stack",
"torch.is_tensor",
"torch.no_grad",
"torch.cuda.device_count",
"numpy.testing.suppress_warnings",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"numpy.stack",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"numpy.arange",
"torch.ones",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.expand_dims"
]
]
|
yunai2384/ADIOS2 | [
"c88fd748720dfdfb0d7f8a529d7838ea86ecfa65"
]
| [
"examples/experimental/runtimeconfig/hello/helloBPWriterXML_nompi.py"
]
| [
"#\n# Distributed under the OSI-approved Apache License, Version 2.0. See\n# accompanying file Copyright.txt for details.\n#\n# helloBPWriterXML_nompi.py serial non-MPI version of helloBPWriter.py\n# Created on: Feb 2, 2017\n# Author: William F Godoy [email protected]\n\nimport numpy\nimport adios2\n\n\n# User data\nmyArray = numpy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\nNx = myArray.size\n\n# ADIOS config file, debug mode\nadios = adios2.ADIOS(\"helloBPWriter.xml\", adios2.DebugON)\n\n# ADIOS IO, name must be the same as in helloBPWriter.xml for runtime settings\nbpIO = adios.DeclareIO(\"BPFile_N2N\")\n\n# ADIOS local array: Variable name, shape, start, offset\nioArray = bpIO.DefineVariable(\n \"bpArray\", [], [], [Nx], adios2.ConstantDims)\n\n# ADIOS Engine\nbpFileWriter = bpIO.Open(\"npArray.bp\", adios2.OpenModeWrite)\nbpFileWriter.Write(ioArray, myArray)\nbpFileWriter.Close()\n"
]
| [
[
"numpy.array"
]
]
|
fito-jaeuklee/Python-NMEA-data-analysis | [
"3aaa2637fefb8284833ce351c722ba3228138425"
]
| [
"testboard.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\n# Major ticks every 20, minor ticks every 5\nmajor_ticks = np.arange(0, 101, 20)\nminor_ticks = np.arange(0, 101, 5)\n\nax.set_xticks(major_ticks)\nax.set_xticks(minor_ticks, minor=True)\nax.set_yticks(major_ticks)\nax.set_yticks(minor_ticks, minor=True)\n\n# And a corresponding grid\nax.grid(which='both')\n\n# Or if you want different settings for the grids:\n# ax.grid(which='major', alpha=1)\n\nplt.show()"
]
| [
[
"matplotlib.pyplot.show",
"numpy.arange",
"matplotlib.pyplot.figure"
]
]
|
Manny27nyc/chainercv | [
"1db5572aa4bac6c40fd811c51dd63f1b6ad57dcf"
]
| [
"tests/links_tests/model_tests/light_head_rcnn_tests/test_light_head_rcnn.py"
]
| [
"import numpy as np\nimport unittest\n\nimport chainer\nfrom chainer import testing\nfrom chainer.testing import attr\n\nfrom chainercv.links.model.light_head_rcnn import LightHeadRCNN\nfrom chainercv.utils import assert_is_detection_link\nfrom chainercv.utils import generate_random_bbox\n\n\ndef _random_array(xp, shape):\n return xp.array(\n np.random.uniform(-1, 1, size=shape), dtype=np.float32)\n\n\nclass DummyExtractor(chainer.Link):\n\n def __init__(self, feat_stride):\n super(DummyExtractor, self).__init__()\n self.feat_stride = feat_stride\n\n def __call__(self, x):\n _, _, H, W = x.shape\n rpn_features = _random_array(\n self.xp, (1, 8, H // self.feat_stride, W // self.feat_stride))\n roi_features = _random_array(\n self.xp, (1, 8, H // self.feat_stride, W // self.feat_stride))\n return rpn_features, roi_features\n\n\nclass DummyHead(chainer.Chain):\n\n def __init__(self, n_class):\n super(DummyHead, self).__init__()\n self.n_class = n_class\n\n def __call__(self, x, rois, roi_indices):\n n_roi = len(rois)\n cls_locs = chainer.Variable(\n _random_array(self.xp, (n_roi, self.n_class * 4)))\n # For each bbox, the score for a selected class is\n # overwhelmingly higher than the scores for the other classes.\n score_idx = np.random.randint(\n low=0, high=self.n_class, size=(n_roi,))\n scores = self.xp.zeros((n_roi, self.n_class), dtype=np.float32)\n scores[np.arange(n_roi), score_idx] = 100\n scores = chainer.Variable(scores)\n\n return cls_locs, scores\n\n\nclass DummyRegionProposalNetwork(chainer.Chain):\n\n def __init__(self, n_anchor_base, n_roi):\n super(DummyRegionProposalNetwork, self).__init__()\n self.n_anchor_base = n_anchor_base\n self.n_roi = n_roi\n\n def __call__(self, x, img_size, scale):\n B, _, H, W = x.shape\n n_anchor = self.n_anchor_base * H * W\n\n rpn_locs = _random_array(self.xp, (B, n_anchor, 4))\n rpn_cls_scores = _random_array(self.xp, (B, n_anchor, 2))\n rois = self.xp.asarray(generate_random_bbox(\n self.n_roi, img_size, 16, min(img_size)))\n roi_indices = self.xp.zeros((len(rois),), dtype=np.int32)\n anchor = self.xp.asarray(generate_random_bbox(\n n_anchor, img_size, 16, min(img_size)))\n return (chainer.Variable(rpn_locs),\n chainer.Variable(rpn_cls_scores), rois, roi_indices, anchor)\n\n\nclass DummyLightHeadRCNN(LightHeadRCNN):\n\n def __init__(\n self, n_anchor_base, feat_stride, n_fg_class, n_roi,\n min_size, max_size, loc_normalize_mean, loc_normalize_std,\n ):\n super(DummyLightHeadRCNN, self).__init__(\n DummyExtractor(feat_stride),\n DummyRegionProposalNetwork(n_anchor_base, n_roi),\n DummyHead(n_fg_class + 1),\n mean=np.array([[[100]], [[122.5]], [[145]]]),\n min_size=min_size,\n max_size=max_size,\n loc_normalize_mean=loc_normalize_mean,\n loc_normalize_std=loc_normalize_std,\n )\n\n\nclass TestLightHeadRCNN(unittest.TestCase):\n\n def setUp(self):\n self.n_anchor_base = 6\n self.feat_stride = 4\n n_fg_class = 4\n self.n_class = n_fg_class + 1\n self.n_roi = 24\n self.link = DummyLightHeadRCNN(\n n_anchor_base=self.n_anchor_base,\n feat_stride=self.feat_stride,\n n_fg_class=n_fg_class,\n n_roi=self.n_roi,\n min_size=600,\n max_size=1000,\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.1, 0.1, 0.2, 0.2),\n )\n\n def check_call(self):\n xp = self.link.xp\n\n x1 = chainer.Variable(_random_array(xp, (1, 3, 600, 800)))\n scales = chainer.Variable(xp.array([1.], dtype=np.float32))\n roi_cls_locs, roi_scores, rois, roi_indices = self.link(x1, scales)\n\n self.assertIsInstance(roi_cls_locs, chainer.Variable)\n self.assertIsInstance(roi_cls_locs.array, xp.ndarray)\n self.assertEqual(roi_cls_locs.shape, (self.n_roi, self.n_class * 4))\n\n self.assertIsInstance(roi_scores, chainer.Variable)\n self.assertIsInstance(roi_scores.array, xp.ndarray)\n self.assertEqual(roi_scores.shape, (self.n_roi, self.n_class))\n\n self.assertIsInstance(rois, xp.ndarray)\n self.assertEqual(rois.shape, (self.n_roi, 4))\n\n self.assertIsInstance(roi_indices, xp.ndarray)\n self.assertEqual(roi_indices.shape, (self.n_roi,))\n\n def test_call_cpu(self):\n self.check_call()\n\n @attr.gpu\n def test_call_gpu(self):\n self.link.to_gpu()\n self.check_call()\n\n def test_predict_cpu(self):\n assert_is_detection_link(self.link, self.n_class - 1)\n\n @attr.gpu\n def test_predict_gpu(self):\n self.link.to_gpu()\n assert_is_detection_link(self.link, self.n_class - 1)\n\n\[email protected](\n {'in_shape': (3, 100, 100), 'expected_shape': (3, 200, 200)},\n {'in_shape': (3, 200, 50), 'expected_shape': (3, 400, 100)},\n {'in_shape': (3, 400, 100), 'expected_shape': (3, 400, 100)},\n {'in_shape': (3, 300, 600), 'expected_shape': (3, 200, 400)},\n {'in_shape': (3, 600, 900), 'expected_shape': (3, 200, 300)}\n)\nclass TestLightHeadRCNNPrepare(unittest.TestCase):\n\n min_size = 200\n max_size = 400\n\n def setUp(self):\n self.link = DummyLightHeadRCNN(\n n_anchor_base=1,\n feat_stride=16,\n n_fg_class=21,\n n_roi=1,\n min_size=self.min_size,\n max_size=self.max_size,\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.1, 0.1, 0.2, 0.2),\n )\n\n def check_prepare(self):\n x = _random_array(np, self.in_shape)\n out = self.link.prepare(x)\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, self.expected_shape)\n\n def test_prepare_cpu(self):\n self.check_prepare()\n\n @attr.gpu\n def test_prepare_gpu(self):\n self.link.to_gpu()\n self.check_prepare()\n\n\ntesting.run_module(__name__, __file__)\n"
]
| [
[
"numpy.array",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.arange"
]
]
|
PengJingchao/DFNet | [
"49e83501f81515aebca211351e315896da7afc54"
]
| [
"modules/dataset/rgb_t_dataset_separate.py"
]
| [
"import sys\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data as data\nimport matplotlib.pyplot as plt\nfrom ..utils import *\n\nimport matplotlib.patches as patches\n\nimport os\nfrom ..sample_generator import *\n\nimport sys\n# from pretrain_options import *\n\nfrom ..img_cropper import *\n\n\nclass RGB_T_Dataset(data.Dataset):\n def __init__(self, rgb_img_dir, rgb_img_list, t_img_dir, t_img_list, gt, receptive_field, opts):\n\n self.rgb_img_list = np.array([os.path.join(rgb_img_dir, img) for img in rgb_img_list])\n self.t_img_list = np.array([os.path.join(t_img_dir, img) for img in t_img_list])\n self.gt = gt\n\n self.batch_frames = opts['batch_frames']\n self.batch_pos = opts['batch_pos']\n self.batch_neg = opts['batch_neg']\n\n self.overlap_pos = opts['overlap_pos']\n self.overlap_neg = opts['overlap_neg']\n\n self.crop_size = opts['img_size']\n self.padding = opts['padding']\n\n self.index = np.random.permutation(len(self.rgb_img_list))\n self.pointer = 0\n\n image = Image.open(self.rgb_img_list[0]).convert('RGB')\n self.scene_generator = SampleGenerator('gaussian', image.size, trans_f=1.5, scale_f=1.2, valid=True)\n self.pos_generator = SampleGenerator('gaussian', image.size, 0.1, 1.2, 1.1, True)\n self.neg_generator = SampleGenerator('uniform', image.size, 1, 1.2, 1.1, True)\n\n self.receptive_field = receptive_field\n\n self.interval = opts['frame_interval']\n self.img_crop_model = imgCropper(opts['padded_img_size'])\n self.img_crop_model.eval()\n self.use_gpu = opts['use_gpu']\n if opts['use_gpu']:\n self.img_crop_model.gpuEnable()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n next_pointer = min(self.pointer + self.batch_frames, len(self.rgb_img_list))\n idx = self.index[self.pointer:next_pointer]\n if len(idx) < self.batch_frames:\n self.index = np.random.permutation(len(self.rgb_img_list))\n next_pointer = self.batch_frames - len(idx)\n idx = np.concatenate((idx, self.index[:next_pointer]))\n self.pointer = next_pointer\n\n n_pos = self.batch_pos\n n_neg = self.batch_neg\n\n scenes_rgb = []\n scenes_t = []\n total_pos_rois = []\n total_neg_rois = []\n for i, (rgb_img_path, t_img_path, bbox) in enumerate(\n zip(self.rgb_img_list[idx], self.t_img_list[idx], self.gt[idx])):\n image_rgb = Image.open(rgb_img_path).convert('RGB')\n image_rgb = np.asarray(image_rgb)\n image_t = Image.open(t_img_path).convert('RGB')\n image_t = np.asarray(image_t)\n\n ishape = image_rgb.shape\n pos_examples = gen_samples(SampleGenerator('gaussian', (ishape[1], ishape[0]), 0.1, 1.2, 1.1, False), bbox,\n n_pos, overlap_range=self.overlap_pos)\n neg_examples = gen_samples(SampleGenerator('uniform', (ishape[1], ishape[0]), 1, 1.2, 1.1, False), bbox,\n n_neg, overlap_range=self.overlap_neg)\n\n # compute padded sample\n padded_x1 = (neg_examples[:, 0] - neg_examples[:, 2] * (self.padding - 1.) / 2.).min()\n padded_y1 = (neg_examples[:, 1] - neg_examples[:, 3] * (self.padding - 1.) / 2.).min()\n padded_x2 = (neg_examples[:, 0] + neg_examples[:, 2] * (self.padding + 1.) / 2.).max()\n padded_y2 = (neg_examples[:, 1] + neg_examples[:, 3] * (self.padding + 1.) / 2.).max()\n padded_scene_box = np.asarray((padded_x1, padded_y1, padded_x2 - padded_x1, padded_y2 - padded_y1))\n\n jitter_scale = 1.1 ** np.clip(3. * np.random.randn(1, 1), -2, 2)\n crop_img_size = (padded_scene_box[2:4] * ((self.crop_size, self.crop_size) / bbox[2:4])).astype('int64') * \\\n jitter_scale[0][0]\n cropped_image, cur_image_var = self.img_crop_model.crop_image(image_rgb,\n np.reshape(padded_scene_box, (1, 4)),\n crop_img_size)\n cropped_image_t, cur_image_var_t = self.img_crop_model.crop_image(image_t,\n np.reshape(padded_scene_box, (1, 4)),\n crop_img_size)\n cropped_image = cropped_image - 128.\n cropped_image_t = cropped_image_t - 128.\n if self.use_gpu:\n cropped_image = cropped_image.data.cpu()\n cur_image_var = cur_image_var.cpu()\n cropped_image_t = cropped_image_t.data.cpu()\n cur_image_var_t = cur_image_var_t.cpu()\n # cropped_image_4channel = torch.cat((cropped_image, cropped_image_t), 1)\n scenes_rgb.append(cropped_image)\n scenes_t.append(cropped_image_t)\n ## get current frame and heatmap\n\n rel_bbox = np.copy(bbox)\n rel_bbox[0:2] -= padded_scene_box[0:2]\n\n jittered_obj_size = jitter_scale[0][0] * float(self.crop_size)\n\n batch_num = np.zeros((pos_examples.shape[0], 1))\n pos_rois = np.copy(pos_examples)\n pos_rois[:, 0:2] -= np.repeat(np.reshape(padded_scene_box[0:2], (1, 2)), pos_rois.shape[0], axis=0)\n pos_rois = samples2maskroi(pos_rois, self.receptive_field, (jittered_obj_size, jittered_obj_size),\n bbox[2:4], self.padding)\n pos_rois = np.concatenate((batch_num, pos_rois), axis=1)\n\n batch_num = np.zeros((neg_examples.shape[0], 1))\n neg_rois = np.copy(neg_examples)\n neg_rois[:, 0:2] -= np.repeat(np.reshape(padded_scene_box[0:2], (1, 2)), neg_rois.shape[0], axis=0)\n neg_rois = samples2maskroi(neg_rois, self.receptive_field, (jittered_obj_size, jittered_obj_size),\n bbox[2:4], self.padding)\n neg_rois = np.concatenate((batch_num, neg_rois), axis=1)\n\n total_pos_rois.append(torch.from_numpy(np.copy(pos_rois).astype('float32')))\n total_neg_rois.append(torch.from_numpy(np.copy(neg_rois).astype('float32')))\n\n return scenes_rgb, scenes_t, total_pos_rois, total_neg_rois\n\n next = __next__\n\n def extract_regions(self, image, samples):\n regions = np.zeros((len(samples), self.crop_size, self.crop_size, 3), dtype='uint8')\n for i, sample in enumerate(samples):\n regions[i] = crop_image(image, sample, self.crop_size, self.padding, True)\n\n regions = regions.transpose(0, 3, 1, 2)\n regions = regions.astype('float32') - 128.\n return regions\n\n\nclass RegionExtractor:\n def __init__(self, image, samples, crop_size, padding, batch_size, shuffle=False):\n\n self.image = np.asarray(image)\n self.samples = samples\n self.crop_size = crop_size\n self.padding = padding\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n self.index = np.arange(len(samples))\n self.pointer = 0\n\n self.mean = self.image.mean(0).mean(0).astype('float32')\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.pointer == len(self.samples):\n self.pointer = 0\n raise StopIteration\n else:\n next_pointer = min(self.pointer + self.batch_size, len(self.samples))\n index = self.index[self.pointer:next_pointer]\n self.pointer = next_pointer\n\n regions = self.extract_regions(index)\n regions = torch.from_numpy(regions)\n return regions\n\n next = __next__\n\n def extract_regions(self, index):\n regions = np.zeros((len(index), self.crop_size, self.crop_size, 3), dtype='uint8')\n for i, sample in enumerate(self.samples[index]):\n regions[i] = crop_image(self.image, sample, self.crop_size, self.padding)\n\n regions = regions.transpose(0, 3, 1, 2).astype('float32')\n regions = regions - 128.\n return regions\n"
]
| [
[
"numpy.concatenate",
"numpy.asarray",
"numpy.zeros",
"numpy.reshape",
"numpy.copy",
"numpy.random.randn",
"torch.from_numpy"
]
]
|
auroua/NPENASv1 | [
"65bdece174f0da2f9a3c716b86859abba077d279"
]
| [
"nas_lib/models_darts/darts_ops.py"
]
| [
"import torch\nimport torch.nn as nn\n\nOPS = {\n 'none': lambda C, stride, affine: Zero(stride),\n 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),\n 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),\n 'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),\n 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),\n 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),\n 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),\n 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),\n 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),\n 'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),\n nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),\n nn.BatchNorm2d(C, affine=affine)\n ),\n}\n\n\nclass ReLUConvBN(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass DilConv(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(SepConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_in, affine=affine),\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:, :, ::self.stride, ::self.stride].mul(0.)\n\n\nclass FactorizedReduce(nn.Module):\n def __init__(self, C_in, C_out, affine=True):\n super(FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)\n self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)\n out = self.bn(out)\n return out\n\n"
]
| [
[
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
]
|
OMaraLab/Rh_NHCs_NMR | [
"fff030d004864765adc6dc4fe2b39051fcbd2790"
]
| [
"orbitals/Rh2_CSe_DMAD_CF3_dppm2/Shift_tensorify.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nwith open(\"Rh2_CSe_Ring_dppm2.out\") as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n if \"Orbital pair contributions to the principal shielding components of Nuc=2Se (ppm)\" in line:\n pair_start = i+2\n if \" Nucleus 3Cl:\" in line:\n pair_end = i -1\n pairlines = lines[pair_start:pair_end]\n MO_is = []\n MO_js = []\n ISOs = []\n iso_pairs = []\n for line in pairlines:\n toks = line.split()\n MO_i = int(toks[0])\n MO_is.append(MO_i)\n MO_j = int(toks[1])\n MO_js.append(MO_j)\n DIA_x = float(toks[2])\n DIA_y = float(toks[3])\n DIA_z = float(toks[4])\n PARA_x = float(toks[5])\n PARA_y = float(toks[6])\n PARA_z = float(toks[7])\n TOTAL_x = float(toks[8])\n TOTAL_y = float(toks[9])\n TOTAL_z = float(toks[10])\n ISO = float(toks[11])\n ISOs.append(ISO)\n ANISO = float(toks[12])\n iso_pairs.append([MO_i,MO_j,ISO])\n \n MO_is = np.asarray(MO_is)\n print(MO_is)\n MO_i_max = MO_is.max()\n MO_js = np.asarray(MO_js)\n MO_j_max = MO_js.max()\n ISOs = np.asarray(ISOs)\n ISO_min = ISOs.min()\n\n\n\n data = np.zeros((MO_i_max+1, MO_j_max+1))\n for pair in iso_pairs:\n i = pair[0]\n j = pair[1]\n val = pair[2]\n if val < -10:\n print(f\"{i} {j} {val}\")\n\n data[i,j] = val\n print(data.min())\n plt.matshow(data, cmap='bwr', vmin=-700, vmax=700)\n plt.axvline(x=MO_i_max, color=\"green\", linestyle=\"dashed\")\n\n plt.show()\n \n\n\n \n\n\n"
]
| [
[
"matplotlib.pyplot.matshow",
"numpy.asarray",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axvline"
]
]
|
smitexx/umucv | [
"875ab90b77fc189a87cef4f16cd090218a574962"
]
| [
"code/shape/trebol5.py"
]
| [
"#!/usr/bin/env python\n\n# Paso 5: Finalmente comparamos los invariantes de los contornos \n# encontrados en la imagen con el modelo y señalamos\n# los que son muy parecidos\n \n\n# ./trebol4.py --dev=dir:../../images/cards.png\n\nimport cv2 as cv\nfrom umucv.stream import autoStream\nimport numpy as np\n# import necesario\nfrom umucv.util import putText\n\n\nfrom numpy.fft import fft\n\n\ndef invar(c, wmax=10):\n x,y = c.T\n z = x+y*1j\n f = fft(z)\n fa = abs(f)\n\n s = fa[1] + fa[-1]\n\n v = np.zeros(2*wmax+1)\n v[:wmax] = fa[2:wmax+2]\n v[wmax:] = fa[-wmax-1:]\n\n \n if fa[-1] > fa[1]:\n v[:-1] = v[-2::-1]\n v[-1] = fa[1]\n\n return v / s\n\n\ndef binarize(gray):\n _, r = cv.threshold(gray, 128, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)\n return r\n\ndef extractContours(image):\n g = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n if black:\n g = 255-g\n b = binarize(g) \n contours, _ = cv.findContours(b.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_NONE)[-2:]\n contours = [ c.reshape(-1,2) for c in contours ]\n contours = sorted(contours, key=cv.contourArea, reverse=True)\n return contours\n\ndef razonable(c):\n return 100**2 >= cv.contourArea(c) >= 10**2\n\ndef orientation(x):\n return cv.contourArea(x.astype(np.float32),oriented=True) >= 0\n\n\nblack = True\n\nshcont = False\n\nmodel = extractContours(cv.imread('../../images/shapes/trebol.png'))[0]\n\nmodelaux = np.zeros([200,200], np.uint8)\ncv.drawContours(modelaux, [model], -1, 255, cv.FILLED)\ncv.imshow('model', modelaux)\n\ninvmodel = invar(model)\n\n\nfor (key,frame) in autoStream():\n\n if key == ord('c'):\n shcont = not shcont\n\n contours = extractContours(frame)\n \n ok = [c for c in contours if razonable(c) and not orientation(c)]\n\n # seleccionamos los contornos con un descriptr muy parecido al del modelo \n found = [c for c in ok if np.linalg.norm(invar(c)-invmodel) < 0.15 ]\n\n\n if shcont:\n result = np.zeros_like(frame)\n cp = [c for c in contours if orientation(c) ]\n cn = [c for c in contours if not orientation(c) ]\n \n cv.drawContours(result, cp, contourIdx=-1, color=(255,128,128), thickness=1, lineType=cv.LINE_AA)\n cv.drawContours(result, cn, -1, (128,128,255), 1)\n else:\n result = frame\n # en este modo de visualización mostramos solo los detectados\n cv.drawContours(result, found, -1, (0,255,0), cv.FILLED)\n \n # y en ambos modos mostramos la similitud y el área\n for c in found:\n info = '{:.2f} {}'.format(np.linalg.norm(invar(c)-invmodel),cv.contourArea(c))\n putText(result ,info,c.mean(axis=0).astype(int))\n\n cv.imshow('shape recognition',result)\n\ncv.destroyAllWindows()\n\n# puedes añadir un trackbar para controlar el umbral de detección\n# se pueden evitar la repetición de operaciones en putText\n\n\n"
]
| [
[
"numpy.zeros_like",
"numpy.fft.fft",
"numpy.zeros"
]
]
|
TanKinh/CycleGan-handwriting_generation | [
"df974b8ae1927f2dbe84345da7c3f3a8a567f138"
]
| [
"data/prepare_data/prepare_casia.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport argparse\nimport sys\nimport numpy as np\nimport scipy.misc\nimport os\nimport glob\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport json\nimport collections\nimport random\n\nprev = 0\n\ndef draw_single_char(ch, font, canvas_size=128, x_offset=0, y_offset=0):\n img = Image.new(\"L\", (canvas_size, canvas_size), 255)\n draw = ImageDraw.Draw(img)\n draw.text((x_offset, y_offset), ch, 0, font=font)\n return img\n\n\ndef resize_image(img):\n # pad to square\n pad_size = int(abs(img.shape[0]-img.shape[1]) / 2)\n if img.shape[0] < img.shape[1]:\n pad_dims = ((pad_size, pad_size), (0, 0))\n else:\n pad_dims = ((0, 0), (pad_size, pad_size))\n img = np.lib.pad(img, pad_dims, mode='constant', constant_values=255)\n # resize\n img = scipy.misc.imresize(img, (128, 128))\n\n assert img.shape == (128, 128)\n return img\n\n\ndef main(path, source_path, ratioA, ratioB, percentage, font_size, offset):\n global prev\n source_font = ImageFont.truetype(source_path, size=font_size)\n f = open(path, \"rb\")\n directory, name = os.path.split(path)\n random.seed(20171201)\n charlist = []\n bitmaplist = []\n sourcelist = []\n tmp = []\n filename = os.path.basename(path).split('.')[0]\n datafolder = os.path.join(os.path.normpath(directory + os.sep + os.pardir),\n\t\t\t 'datasets',\n str.join('_', [name.split('.')[0], str(font_size), str(offset), str(ratioA)]))\n\n print(datafolder)\n if not os.path.exists(datafolder):\n os.makedirs(datafolder)\n trainA_path = os.path.join(datafolder, 'trainA_0.5')\n trainB_path = os.path.join(datafolder, 'trainB_0.5')\n testA_path = os.path.join(datafolder, 'testA_0.5')\n testB_path = os.path.join(datafolder, 'testB_0.5')\n folders = [trainA_path,trainB_path, testA_path, testB_path]\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)\n while True:\n tmp = f.read(4)\n if len(tmp) is 0:\n break\n else:\n sample_size = np.fromstring(tmp, dtype=np.uint32).item()\n tag_code = np.fromstring(f.read(2), dtype=np.uint16).newbyteorder().item()\n width = np.fromstring(f.read(2), dtype=np.uint16).item()\n height = np.fromstring(f.read(2), dtype=np.uint16).item()\n bitmap = np.fromstring(f.read(width * height), dtype=np.uint8)\n bitmap = bitmap.reshape([height, width])\n bitmap = resize_image(bitmap)\n if (random.randrange(100) <= percentage):\n bitmaplist.append(bitmap)\n ch = bytearray.fromhex(str(hex(tag_code))[2:]).decode('gb2312')\n charlist.append(ch)\n source_img = draw_single_char(ch, font = source_font, x_offset=offset, y_offset=offset)\n sourcelist.append(source_img)\n\n print(\"Number of images: {}\".format(len(sourcelist)))\n arr = np.arange(len(charlist))\n np.random.shuffle(arr)\n ntrainA = np.floor(float(ratioA) * len(charlist))\n ntrainB = np.floor(float(ratioB) * len(charlist))\n for i, x in enumerate(np.arange(len(arr))):\n ch = charlist[arr[x]]\n print(ord(ch),' ',ch)\n bitmap = bitmaplist[arr[x]]\n source_img = sourcelist[arr[x]]\n if arr[x]<=ntrainA and arr[x]<=ntrainB:\n scipy.misc.imsave(os.path.join(trainA_path, str(ord(ch)) + '.png'), bitmap)\n scipy.misc.imsave(os.path.join(trainB_path, str(ord(ch)) + '.png'), source_img)\n elif arr[x]>ntrainA and arr[x]<=ntrainB:\n scipy.misc.imsave(os.path.join(testA_path, str(ord(ch)) + '.png'), bitmap)\n scipy.misc.imsave(os.path.join(trainB_path, str(ord(ch)) + '.png'), source_img)\n elif arr[x]<=ntrainA and arr[x]>ntrainB:\n scipy.misc.imsave(os.path.join(trainA_path, str(ord(ch)) + '.png'), bitmap)\n scipy.misc.imsave(os.path.join(testB_path, str(ord(ch)) + '.png'), source_img)\n else:\n scipy.misc.imsave(os.path.join(testA_path, str(ord(ch)) + '.png'), bitmap)\n scipy.misc.imsave(os.path.join(testB_path, str(ord(ch)) + '.png'), source_img)\n prev += len(arr)\n\n\nif __name__ == '__main__':\n # ython data/prepare_data/prepare_casia.py --source data/sources/1252-c.gnt --font data/fonts/simhei.ttf --fontSize 116 --offset 6 --percent 100 --ratioA 0.9 --ratioB 0.9\n parser = argparse.ArgumentParser(description=\"Preprocessing data\")\n parser.add_argument('--source', dest='source', help=\"input file(s) to process\")\n parser.add_argument('--font', dest='font', help=\"font to process\")\n parser.add_argument('--ratioA', dest='ratioA', type=float, default='0.7', help='the split ratio of the training and test data')\n parser.add_argument('--ratioB', dest='ratioB', type=float, default='0.7', help='the split ratio of the training and test data')\n parser.add_argument('--percent', dest='percent', type=int, default='50', help='the wanted percentage of dataset')\n parser.add_argument('--fontSize', dest='fontSize', type=int, default='128', help='the wanted size of font character')\n parser.add_argument('--offset', dest='offset', type=int, default='0', help='the x and y offset of font character image')\n args = parser.parse_args()\n\n print(args.source, args.font, args.ratioA, args.ratioB, args.percent, args.fontSize, args.offset)\n main(args.source, args.font, args.ratioA, args.ratioB, args.percent, args.fontSize, args.offset)\n\n"
]
| [
[
"numpy.lib.pad",
"numpy.fromstring",
"numpy.random.shuffle"
]
]
|
garagonc/optimization-framework | [
"1ca57699d6a3f2f98dcaea96430e75c3f847b49f"
]
| [
"prediction/FUR_Incremental_OneDay.py"
]
| [
"import datetime\nimport time\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport random as rn\nimport os\n\nimport keras\nfrom keras import Input\nfrom keras.models import Sequential, Model\nfrom keras.layers import concatenate\nfrom keras.layers import Dense\nfrom keras.layers import LSTM, Dropout\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import load_model\nfrom keras import regularizers\nimport keras as k\nos.environ['PYTHONHASHSEED'] = '0'\nnp.random.seed(42)\nrn.seed(12345)\n# Restricting operation to 1 thread for reproducible results.\nsession_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\nfrom keras import backend as K\n# Setting the graph-level random seed.\ntf.set_random_seed(1234)\nsess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\nK.set_session(sess)\n\n\n\ndef read_data(filename):\n df = pd.read_csv(filename, header=None)\n df.columns = ['Time', 'PV']\n df['Time'] = pd.to_datetime(df[\"Time\"], errors='coerce')\n df.index = df[\"Time\"]\n df = df.drop(columns=['Time'])\n print(df.head())\n\n return df\n\ndef pre_processing_data(real_file, hist_file):\n\n df = pd.read_csv(real_file, header=None)\n df.columns = ['Time', 'Values']\n df['Time'] = pd.to_datetime(df[\"Time\"], errors='coerce')\n df.index = df[\"Time\"]\n df = df.drop(columns=['Time'])\n print(\"read csv\")\n print(df.head())\n\n #Changing Frequency of Data to Minutes\n df = df.resample('T').mean()\n \n #checking for null values and if any, replacing them with last valid observation\n df.isnull().sum()\n df.Values.fillna(method='pad', inplace=True) \n data = df.values.reshape(-1, 1) \n flat_list = [item for sublist in data for item in sublist]\n #Quantile Normalization\n s = pd.Series(flat_list)\n quant = s.quantile(0.75)\n Xmin = np.amin(data)\n Xmax = quant\n X_std = (data - Xmin) / (Xmax - Xmin) \n max = 1\n min = 0\n X_scaled = X_std * (max - min) + min\n\n hist_data = []\n start_date_hist = datetime.datetime.strptime(\"2016-01-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\n with open(hist_file, \"r\") as f:\n data = f.readlines()\n data.insert(0, data[-1])\n for v in data:\n hist_data.append([start_date_hist.strftime(\"%Y-%m-%d %H:%M:%S\"), float(v)])\n start_date_hist += datetime.timedelta(hours=1)\n\n hd = pd.DataFrame(hist_data, columns=['Time', 'Values'])\n hd['Time'] = pd.to_datetime(hd[\"Time\"], errors='coerce')\n hd.index = hd[\"Time\"]\n hd = hd.drop(columns=['Time'])\n print(hd.head(20))\n\n data = hd.values.reshape(-1, 1)\n Xmin = np.amin(data)\n Xmax = np.amax(data)\n X_std = (data - Xmin) / (Xmax - Xmin)\n max = 1\n min = 0\n X_scaled_hist = X_std * (max - min) + min\n\n return X_scaled, df, X_scaled_hist, hd\n\ndef train_model(realXtrain, histXtrain, Ytrain, model, input_size_real, input_size_hist, hidden_size, batch_size,\n output_size, Num_Epochs):\n \n #Creating LSTM's structure\n if model is None:\n print(\"Training the model..........\")\n real_input = Input(batch_shape=(batch_size, input_size_real, 1), name=\"real\")\n real_features = LSTM(hidden_size, stateful=True, return_sequences=True)(real_input)\n\n hist_input = Input(batch_shape=(batch_size, input_size_hist, 1), name=\"hist\")\n hist_features = LSTM(hidden_size, stateful=True, return_sequences=True)(hist_input)\n\n x = concatenate([real_features, hist_features], axis=1)\n x = Dropout(0.3)(x)\n x = LSTM(hidden_size, stateful=True)(x)\n output_layer = Dense(output_size)(x)\n\n model = Model(inputs=[real_input, hist_input], outputs=output_layer)\n\n model.summary()\n\n adam = k.optimizers.Adam(lr=0.01)\n model.compile(loss=\"mean_squared_error\", optimizer=adam,\n metrics=[\"mean_squared_error\"])\n\n model.compile(loss=\"mean_squared_error\", optimizer=adam,\n metrics=[\"mean_squared_error\"])\n \n \n # define reduceLROnPlateau and early stopping callback\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2,\n patience=3, min_lr=0.001)\n earlystop = EarlyStopping(monitor='loss', min_delta=0.0001, patience=3, verbose=1, mode='auto')\n \n # define the checkpoint\n filepath = \"model.h5\"\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min')\n \n callbacks_list = [reduce_lr,earlystop,checkpoint]\n \n #Training a stateful LSTM\n for i in range(Num_Epochs):\n print(\"Epoch {:d}/{:d}\".format(i+1, Num_Epochs))\n model.fit({\"real\": realXtrain, \"hist\": histXtrain}, Ytrain, batch_size=Batch_Size, epochs=1, verbose=2, callbacks=callbacks_list, shuffle=False)\n model.reset_states()\n \n return model\n\n\ndef predict_model(model, realXtest, histXtest, Batch_Size):\n \n #Predicting for the test data\n start_time = time.clock()\n pred = model.predict({\"real\": realXtest, \"hist\": histXtest},batch_size=Batch_Size)\n end_time = time.clock()\n time_taken = end_time - start_time\n\n return pred[0], time_taken\n\ndef find_nearest_hour_index(t):\n start_date_hist = datetime.datetime.strptime(\"2016-01-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\n if t.minute > 30:\n t = t.replace(year=2016, minute=0, second=0, microsecond=0) + datetime.timedelta(hours=1)\n else:\n t = t.replace(year=2016, minute=0, second=0, microsecond=0)\n index = int((t - start_date_hist).total_seconds()/3600)\n return index\n\n\ndef incremental_algorithm(X_scaled, df, X_scaled_hist, Hist_input_size, look_back, Hidden_Size, Batch_Size, Num_Epochs):\n\n num_features = 1\n prediction_horizon = 1440\n nb_samples = X_scaled.shape[0] - look_back - prediction_horizon\n x_train_reshaped = np.zeros((nb_samples, look_back, num_features))\n y_train_reshaped = np.zeros((nb_samples, prediction_horizon))\n print(\"----\", X_scaled.shape[0])\n print(\"initial X\",x_train_reshaped.shape)\n print(\"initial Y\",y_train_reshaped.shape)\n \n train_time = []\n prediction_time = []\n prediction_error = []\n prediction_median = []\n prediction_std = []\n\n for i in range(nb_samples):\n start_date_index = find_nearest_hour_index(datetime.datetime.strptime(str(df.index[i]), \"%Y-%m-%d %H:%M:%S\"))\n\n end_date_index = start_date_index + Hist_input_size\n histXtrain = X_scaled_hist[start_date_index:end_date_index]\n if end_date_index >= len(X_scaled_hist):\n histXtrain = histXtrain + X_scaled_hist[0:len(X_scaled_hist)-end_date_index]\n\n histXtrain = np.reshape(histXtrain, (1,) + histXtrain.shape)\n print(\"hist shape \"+str(histXtrain.shape))\n y_position = i + look_back\n y_position_end = y_position + prediction_horizon\n x_train_reshaped[i] = X_scaled[i:y_position]\n y__re = X_scaled[y_position:y_position_end]\n y_train_reshaped[i] = [item for sublist in y__re for item in sublist]\n realXtrain = np.reshape(x_train_reshaped[i], (1,) + x_train_reshaped[i].shape)\n ytrain = np.reshape(y_train_reshaped[i], (1,) + y_train_reshaped[i].shape)\n\n print(\"realX train shape : \"+str(realXtrain.shape))\n\n start_time = time.clock()\n if i == 0:\n trained_model = train_model(realXtrain, histXtrain, ytrain, None, look_back, Hist_input_size, Hidden_Size, Batch_Size,\n prediction_horizon, Num_Epochs)\n else:\n trained_model = train_model(realXtrain, histXtrain, ytrain, trained_model, look_back, Hist_input_size, Hidden_Size, Batch_Size,\n prediction_horizon, Num_Epochs)\n end_time = time.clock()\n time_taken = end_time - start_time\n predicted_value, predTime = predict_model(trained_model, realXtrain, histXtrain, Batch_Size)\n error = abs(ytrain[0] - predicted_value)\n error_median = np.median(error)\n error_std = np.std(error)\n error_mean = np.mean(error)\n prediction_median.append(error_median)\n prediction_std.append(error_std)\n prediction_error.append(error_mean)\n train_time.append(time_taken)\n prediction_time.append(predTime)\n print(\"The iteration is **** \", i)\n\n return prediction_error, prediction_median, train_time, prediction_time\n\ndef post_processing_data(df, prediction_error, prediction_median, train_time, prediction_time):\n\n pred_new_df = df[1440:] # instead of 24 now 1440\n new_df_date = pred_new_df[-len(pred_new_df):]\n test_act = new_df_date.reset_index()\n test_act = test_act.drop('Values', axis =1)\n \n #Adding datetime to prediction error and changing to dataframe\n test_predictions_date = pd.DataFrame(prediction_error)\n test_predictions_date.columns = ['Values']\n test_predictions_date['Time'] = test_act['Time']\n \n #Adding datetime to prediction error median and changing to dataframe\n test_predictions_medianError = pd.DataFrame(prediction_median)\n test_predictions_medianError.columns = ['Values']\n test_predictions_medianError['Time'] = test_act['Time']\n \n print(\"Average Error is\", test_predictions_date['Values'].mean())\n \n #Writing predicitons to a csv file\n test_predictions_date.to_csv('MAE_House20.csv')\n test_predictions_medianError.to_csv('MedianError_House20.csv')\n \n train_time_date = pd.DataFrame(train_time)\n prediction_time_date = pd.DataFrame(prediction_time)\n train_time_date.columns = ['Values']\n prediction_time_date.columns = ['Values']\n train_new_df = df[1:]\n new_df_date = df[-len(train_new_df):]\n train_act = new_df_date.reset_index()\n train_act = train_act.drop('Values', axis =1)\n train_time_date['Time'] = train_act['Time']\n prediction_time_date['Time'] = test_act['Time']\n \n train_time_date.to_csv('TrainTime_BestModel_House20.csv')\n prediction_time_date.to_csv('PredTime_BestModel_House20.csv')\n\n \"\"\"\n #Generating the Plots\n f1 = plt.figure()\n f2 = plt.figure()\n f3 = plt.figure()\n f1.set_size_inches(18.5, 10.5)\n f2.set_size_inches(18.5, 10.5)\n f3.set_size_inches(18.5, 10.5)\n \n ax1 = f1.add_subplot(111)\n ax1.set_xlabel('Months')\n ax1.set_ylabel('Error')\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax1.xaxis_date()\n fit1 = np.polyfit(np.arange(len(test_predictions_date.Time)),test_predictions_date.Values,1)\n fit_fn1 = np.poly1d(fit1)\n ax1.plot(test_predictions_date.Time,test_predictions_date.Values,'yo',label='Error')\n ax1.plot(test_predictions_date.Time, fit_fn1(np.arange(len(test_predictions_date.Time))), '--k',label='Regression')\n \n ax2 = f2.add_subplot(111)\n ax2.set_xlabel('Months')\n ax2.set_ylabel('Time [Sec]')\n ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax2.xaxis_date()\n ax2.plot(train_time_date.Time,train_time_date.Values,label='Training Time')\n \n ax3 = f3.add_subplot(111)\n ax3.set_xlabel('Months')\n ax3.set_ylabel('Time [Sec]')\n ax3.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax3.xaxis_date()\n ax3.plot(prediction_time_date.Time,prediction_time_date.Values,label='Prediction Time')\n \n ax1.legend(loc='upper left')\n ax2.legend(loc='upper left')\n ax3.legend(loc='upper left')\n plt.show()\n \"\"\"\n\nif __name__ == \"__main__\":\n X_scaled, new_df, X_scaled_hist, new_hd = pre_processing_data(\"/usr/src/app/prediction/pv_data_house_20.csv\",\n \"/usr/src/app/prediction/pv_data_bolzano_italy.txt\")\n X_scaled, new_df = X_scaled[0:3000], new_df[0:3000]\n #Defining Hyperparameters\n Num_Timesteps = 1 # instead of 24 now lookback 1440\n Hidden_Size = 48\n Batch_Size = 1\n Num_Epochs = 5\n Hist_input_size = 24\n \n Error_List, Error_Median, Train_Time, Prediction_Time = incremental_algorithm(X_scaled, new_df, X_scaled_hist,\n Hist_input_size,\n Num_Timesteps, Hidden_Size,\n Batch_Size, Num_Epochs)\n post_processing_data(new_df, Error_List, Error_Median, Train_Time, Prediction_Time)"
]
| [
[
"tensorflow.set_random_seed",
"pandas.to_datetime",
"numpy.reshape",
"numpy.zeros",
"tensorflow.get_default_graph",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.median",
"numpy.mean",
"tensorflow.ConfigProto",
"numpy.amin",
"numpy.std",
"numpy.amax",
"pandas.Series",
"pandas.read_csv"
]
]
|
akhil-rana/SlideLoader | [
"61719008c89fc7d758fa198234869fb22628ebeb"
]
| [
"spritemaker.py"
]
| [
"from pathlib import Path\nfrom PIL import Image\nimport numpy as np\nimport sys\nimport os\n\n\ndef createSpritesheet(datasetPath, labelsNames, width, height):\n # print(Path(__file__).parent.absolute())\n # Constants\n TRAINING_PATH = datasetPath+'/spritesheet/'\n # SPRITE_SIZE = 60\n print(TRAINING_PATH, file=sys.stderr)\n\n # Initialization\n x_data = []\n y_data = []\n # final_image = np.array([])\n y_offset = 0\n imageCount = 0\n imageFiles = []\n for image_file in Path(TRAINING_PATH).glob(\"**/*.jpg\"):\n imageCount += 1\n imageFiles.append(image_file)\n for image_file in Path(TRAINING_PATH).glob(\"**/*.png\"):\n imageCount += 1\n imageFiles.append(image_file)\n for image_file in Path(TRAINING_PATH).glob(\"**/*.jpeg\"):\n imageCount += 1\n imageFiles.append(image_file)\n\n print(imageCount, file=sys.stderr)\n new_im = Image.new('RGB', (width*height, imageCount))\n\n labels = [0]*(len(labelsNames))\n # print(len(sys.argv))\n\n # Load the training sprite by looping over every image file\n for image_file in imageFiles:\n\n # Load the current image file\n src_image = Image.open(image_file)\n # make it smaller\n downsized = src_image.resize((width, height))\n\n # get 1px high version\n pixels = list(downsized.getdata())\n smoosh = Image.new('RGB', (width * height, 1))\n smoosh.putdata(pixels)\n\n # store image\n x_data.append(smoosh)\n folderName = str(image_file.parent.absolute(\n ))[-(len(str(image_file.parent.absolute()))-str(image_file.parent.absolute()).rindex('/')-1):]\n # print(folderName)\n # for i in image_file.stem:\n # print(i)\n # print(sys.argv[2])\n # Use image path to build our answer key\n for i in range(1, len(labelsNames)+1):\n if folderName == labelsNames[i-1]:\n y_data.append(i)\n labels[i-1] += 1\n\n print(labels)\n\n # randomize X and Y the same way before making data\n\n assert len(y_data) == len(x_data)\n p = np.random.permutation(len(y_data))\n npy = np.array(y_data)\n shuffled_y = npy[p].tolist()\n\n one_hot_y = []\n # Build the data image and 1-hot encoded answer array\n for idx in p:\n # build master sprite 1 pixel down at a time\n new_im.paste(x_data[idx], (0, y_offset))\n\n for i in range(1, len(labelsNames)+1):\n if shuffled_y[y_offset] == i:\n for j in range(1, len(labelsNames)+1):\n if j == i:\n one_hot_y.append(1)\n else:\n one_hot_y.append(0)\n # build 1-hot encoded answer key\n\n y_offset += 1\n\n # Save answers file (Y)\n newFile = open(datasetPath+'/spritesheet/labels.bin', \"wb\")\n newFileByteArray = bytearray(one_hot_y)\n bytesWrite = newFile.write(newFileByteArray)\n # should be num classes * original answer key size\n assert bytesWrite == ((len(labelsNames)) * len(y_data))\n\n # Save Data Sprite (X)\n # new_im = new_im.convert(\"RGBA\")\n\n # pixdata = new_im.load()\n\n # Clean the background noise, if color != white, then set to black.\n # change with your color\n\n # for y in range(new_im.size[1]):\n # for x in range(new_im.size[0]):\n # if pixdata[x, y][0] == 255:\n # pixdata[x, y] = (255, 255, 255)\n\n new_im.save(datasetPath+'/spritesheet/data.jpg')\n"
]
| [
[
"numpy.array"
]
]
|
haimin777/models | [
"e888d8d22bb0a296ac26ce0c32e29a35f4bee4c4"
]
| [
"official/projects/qat/vision/tasks/retinanet_test.py"
]
| [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for RetinaNet task.\"\"\"\n# pylint: disable=unused-import\nfrom absl.testing import parameterized\nimport orbit\nimport tensorflow as tf\n\nfrom official import vision\nfrom official.core import exp_factory\nfrom official.modeling import optimization\nfrom official.projects.qat.vision.tasks import retinanet\nfrom official.vision.configs import retinanet as exp_cfg\n\n\nclass RetinaNetTaskTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n ('retinanet_spinenet_mobile_coco_qat', True),\n ('retinanet_spinenet_mobile_coco_qat', False),\n )\n def test_retinanet_task(self, test_config, is_training):\n \"\"\"RetinaNet task test for training and val using toy configs.\"\"\"\n config = exp_factory.get_exp_config(test_config)\n # modify config to suit local testing\n config.task.model.input_size = [128, 128, 3]\n config.trainer.steps_per_loop = 1\n config.task.train_data.global_batch_size = 1\n config.task.validation_data.global_batch_size = 1\n config.task.train_data.shuffle_buffer_size = 2\n config.task.validation_data.shuffle_buffer_size = 2\n config.train_steps = 1\n\n task = retinanet.RetinaNetTask(config.task)\n model = task.build_model()\n metrics = task.build_metrics(training=is_training)\n\n strategy = tf.distribute.get_strategy()\n\n data_config = config.task.train_data if is_training else config.task.validation_data\n dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,\n data_config)\n iterator = iter(dataset)\n opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)\n optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())\n\n if is_training:\n task.train_step(next(iterator), model, optimizer, metrics=metrics)\n else:\n task.validation_step(next(iterator), model, metrics=metrics)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"tensorflow.distribute.get_strategy",
"tensorflow.test.main"
]
]
|
gmke/WesternMeteorPyLib | [
"f218ac5abc9c1a53fe3844039909f8efb929b50e"
]
| [
"wmpl/Utils/AtmosphereDensity.py"
]
| [
"\"\"\" Plots NRL MSISE atmosphere density model. \"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\n\nfrom wmpl.PythonNRLMSISE00.nrlmsise_00_header import *\nfrom wmpl.PythonNRLMSISE00.nrlmsise_00 import *\nfrom wmpl.Utils.TrajConversions import jd2Date, jd2LST\n\n\n\ndef atmDensPoly6th(ht, dens_co):\n \"\"\" Compute the atmosphere density using a 6th order polynomial. This is used in the ablation simulation\n for faster execution. \n\n Arguments:\n ht: [float] Height above sea level (m).\n dens_co: [list] Coeffs of the 6th order polynomial.\n\n Return: \n atm_dens: [float] Atmosphere neutral mass density in kg/m^3.\n \"\"\"\n\n # Compute the density\n rho_a = 1000*(10**(dens_co[0] \n + dens_co[1]*(ht/1000)\n + dens_co[2]*(ht/1000)**2 \n + dens_co[3]*(ht/1000)**3 \n + dens_co[4]*(ht/1000)**4 \n + dens_co[5]*(ht/1000)**5))\n\n return rho_a\n\n\n\ndef atmDensPoly(ht, dens_co):\n \"\"\" Compute the atmosphere density using a 7th order polynomial. This is used in the ablation simulation\n for faster execution. \n\n Arguments:\n ht: [float] Height above sea level (m).\n dens_co: [list] Coeffs of the 7th order polynomial.\n\n Return: \n atm_dens: [float] Atmosphere neutral mass density in kg/m^3. Note that the minimum set density is\n 10^-14 kg/m^3.\n \"\"\"\n\n # Compute the density (height is scaled to megameters to avoid overflows when raising it to the 6th power)\n rho_a = 10**(dens_co[0] \n + dens_co[1]*(ht/1e6) \n + dens_co[2]*(ht/1e6)**2 \n + dens_co[3]*(ht/1e6)**3 \n + dens_co[4]*(ht/1e6)**4 \n + dens_co[5]*(ht/1e6)**5\n + dens_co[6]*(ht/1e6)**6\n )\n\n # Set a minimum density\n if isinstance(rho_a, np.ndarray):\n rho_a[rho_a == 0] = 1e-14\n else:\n if rho_a == 0:\n rho_a = 1e-14\n\n return rho_a\n\n\n\ndef fitAtmPoly(lat, lon, height_min, height_max, jd):\n \"\"\" Fits a 7th order polynomial on the atmosphere mass density profile at the given location, time, and \n for the given height range.\n\n Arguments:\n lat: [float] Latitude in radians.\n lon: [float] Longitude in radians.\n height_min: [float] Minimum height in meters. E.g. 30000 or 60000 are good values.\n height_max: [float] Maximum height in meters. E.g. 120000 or 180000 are good values.\n jd: [float] Julian date.\n\n Return:\n dens_co: [list] Coeffs for the 7th order polynomial.\n \"\"\"\n\n # Generate a height array\n height_arr = np.linspace(height_min, height_max, 200)\n\n # Get atmosphere densities from NRLMSISE-00 (use log values for the fit)\n atm_densities = np.array([getAtmDensity(lat, lon, ht, jd) for ht in height_arr])\n atm_densities_log = np.log10(atm_densities)\n\n\n def atmDensPolyLog(height_arr, *dens_co):\n return np.log10(atmDensPoly(height_arr, dens_co))\n\n # Fit the 7th order polynomial\n dens_co, _ = scipy.optimize.curve_fit(atmDensPolyLog, height_arr, atm_densities_log, \\\n p0=np.zeros(7), maxfev=10000)\n\n return dens_co\n\n\n \n\n\ndef getAtmDensity(lat, lon, height, jd):\n \"\"\" For the given heights, returns the atmospheric density from NRLMSISE-00 model. \n \n More info: https://github.com/magnific0/nrlmsise-00/blob/master/nrlmsise-00.h\n\n Arguments:\n lat: [float] Latitude in radians.\n lon: [float] Longitude in radians.\n height: [float] Height in meters.\n jd: [float] Julian date.\n\n Return:\n [float] Atmosphere density in kg/m^3.\n\n \"\"\"\n\n\n # Init the input array\n inp = nrlmsise_input()\n\n\n # Convert the given Julian date to datetime\n dt = jd2Date(jd, dt_obj=True)\n\n # Get the day of year\n doy = dt.timetuple().tm_yday\n\n # Get the second in day\n midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)\n sec = (dt - midnight).seconds\n\n # Calculate the Local sidreal time (degrees)\n lst, _ = jd2LST(jd, np.degrees(lon))\n\n\n ### INPUT PARAMETERS ###\n ##########################################################################################################\n # Set year (no effect)\n inp.year = 0\n\n # Day of year\n inp.doy = doy\n\n # Seconds in a day\n inp.sec = sec\n\n # Altitude in kilometers\n inp.alt = height/1000.0\n\n # Geodetic latitude (deg)\n inp.g_lat = np.degrees(lat)\n\n # Geodetic longitude (deg)\n inp.g_long = np.degrees(lon)\n\n # Local apparent solar time (hours)\n inp.lst = lst/15\n\n\n # f107, f107A, and ap effects are neither large nor well established below 80 km and these parameters \n # should be set to 150., 150., and 4. respectively.\n\n # 81 day average of 10.7 cm radio flux (centered on DOY)\n inp.f107A = 150\n\n # Daily 10.7 cm radio flux for previous day\n inp.f107 = 150\n\n # Magnetic index (daily)\n inp.ap = 4\n\n ##########################################################################################################\n\n\n # Init the flags array\n flags = nrlmsise_flags()\n\n # Set output in kilograms and meters\n flags.switches[0] = 1\n\n # Set all switches to ON\n for i in range(1, 24):\n flags.switches[i] = 1\n\n \n # Array containing the following magnetic values:\n # 0 : daily AP\n # 1 : 3 hr AP index for current time\n # 2 : 3 hr AP index for 3 hrs before current time\n # 3 : 3 hr AP index for 6 hrs before current time\n # 4 : 3 hr AP index for 9 hrs before current time\n # 5 : Average of eight 3 hr AP indicies from 12 to 33 hrs prior to current time\n # 6 : Average of eight 3 hr AP indicies from 36 to 57 hrs prior to current time \n aph = ap_array()\n\n # Set all AP indices to 100\n for i in range(7):\n aph.a[i] = 100\n\n\n # Init the output array\n # OUTPUT VARIABLES:\n # d[0] - HE NUMBER DENSITY(CM-3)\n # d[1] - O NUMBER DENSITY(CM-3)\n # d[2] - N2 NUMBER DENSITY(CM-3)\n # d[3] - O2 NUMBER DENSITY(CM-3)\n # d[4] - AR NUMBER DENSITY(CM-3) \n # d[5] - TOTAL MASS DENSITY(GM/CM3) [includes d[8] in td7d]\n # d[6] - H NUMBER DENSITY(CM-3)\n # d[7] - N NUMBER DENSITY(CM-3)\n # d[8] - Anomalous oxygen NUMBER DENSITY(CM-3)\n # t[0] - EXOSPHERIC TEMPERATURE\n # t[1] - TEMPERATURE AT ALT\n out = nrlmsise_output()\n\n\n # Evaluate the atmosphere with the given parameters\n gtd7(inp, flags, out)\n\n\n # Get the total mass density\n atm_density = out.d[5]\n\n return atm_density\n\n\n\ngetAtmDensity_vect = np.vectorize(getAtmDensity, excluded=['jd'])\n\n\n\n\nif __name__ == \"__main__\":\n\n import datetime\n from wmpl.Utils.TrajConversions import datetime2JD\n \n lat = 44.327234\n lon = -81.372350\n jd = datetime2JD(datetime.datetime.now())\n\n # Density evaluation heights (m)\n heights = np.linspace(60, 180, 100)*1000\n\n atm_densities = []\n for height in heights:\n atm_density = getAtmDensity(np.radians(lat), np.radians(lon), height, jd)\n atm_densities.append(atm_density)\n\n\n plt.semilogx(atm_densities, heights/1000, zorder=3, label=\"NRLMSISE-00\")\n\n\n # Fit the 6th order poly model\n dens_co = fitAtmPoly(np.radians(lat), np.radians(lon), 60000, 180000, jd)\n\n print(dens_co)\n\n # Plot the fitted poly model\n plt.semilogx(atmDensPoly(heights, dens_co), heights/1000, label=\"Poly fit\")\n\n plt.legend()\n\n plt.xlabel('Density (kg/m^3)')\n plt.ylabel('Height (km)')\n\n plt.grid()\n\n plt.title('NRLMSISE-00')\n\n # plt.savefig('atm_dens.png', dpi=300)\n\n plt.show()"
]
| [
[
"matplotlib.pyplot.semilogx",
"numpy.vectorize",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.degrees",
"numpy.radians",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.log10"
]
]
|
mettalrose/text_processing | [
"52b47d7931a88cf8e0414aaaf0b8fccf339fd938"
]
| [
"metadata_processing/process_metadata_one_semester.py"
]
| [
"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n# DESCRIPTION: Given an excel file or files passed as arguments to the script,\n# create one csv file with all tabs for instructors in the original spreadsheet\n# The original spreadsheet needs a master tab, which is the first tab in\n# the original excel file.\n#\n# Usage example:\n# python process_metadata_one_semester.py --directory=../../../Metadata/Fall\\ 2018/ --master_student_file=../../../Metadata/Master_Student_metadata_legacy.xlsx --instructor_codes_file=../../../Metadata/Instructor_Codes.xlsx\n# The directory is where the multiple excel files are stored. \n# The master student file is the master spreadsheet with all metadata from previous semesters. \n# The instructor codes file is the spreadsheet where the instructor codes (and names) are stored.\n# A new csv file with a similar name to the original spreadsheet\n# will be created.\n\n\nimport argparse\nimport sys\nimport re\nimport os\nimport codecs\nimport pandas\nimport numpy\n\n# Define the way we retrieve arguments sent to the script.\nparser = argparse.ArgumentParser(description='Excel to CSV')\nparser.add_argument('--directory', action=\"store\", dest='dir', default='')\nparser.add_argument('--file', action=\"store\", dest='file', default='')\nparser.add_argument('--master_student_file', action=\"store\", dest='master_student', default='')\nparser.add_argument('--instructor_codes_file', action=\"store\", dest='instructor_codes', default='')\nargs = parser.parse_args()\n\ndef combine_tabs(filename):\n if '.xlsx' in filename or '.xls' in filename:\n print(\"Opening file \" + filename)\n data = pandas.ExcelFile(filename)\n tabs = data.sheet_names\n mastertab = pandas.read_excel(data, 0)\n #print(mastertab.head())\n frames = []\n\n # comment: get each tab in the excel file\n for i in range(1,len(tabs)):\n studentid = []\n tab = tabs[i]\n print(\"Getting data from \" + tab + \" tab.\")\n this_tab = pandas.read_excel(data, tab)\n #print(this_tab)\n # comment: add the tab name as a new column in the data\n this_tab[\"Instructor Last Name\"] = tab\n #print(this_tab)\n #print(this_tab.columns)\n # comment: add student ids to list of studdent ids\n\n for element in this_tab[\"Registrar ID\"].tolist():\n studentid.append(element)\n #print(\"instructor tab: \", len(studentid))\n\n # comment: get the same students from the master tab\n\n filterid = mastertab.loc[mastertab[\"Registrar ID\"].isin(studentid)]\n #print(filterid[\"Registrar ID\"])\n #print (len(studentid[,]))\n #print (\"mastertab: \", len (filterid[\"Registrar ID\"].tolist()))\n\n # comment: make sure tab and master are the same\n if len(studentid) == len(filterid[\"Registrar ID\"].tolist()):\n\n #print(\"they are the same\")\n # comment: add this tab to frames list to combine data\n frames.append(this_tab)\n else:\n print(\"There's a mismatch between instructor tab and master tab\")\n\n # comment: combine all data\n if len(frames) != 0:\n combined_data = pandas.concat(frames, sort=False)\n #print(combined_data)\n\n # comment: get list of student IDs\n allstudents = combined_data[\"Registrar ID\"].unique()\n print(\"There are \" + str(len(allstudents)) + \" students to process.\")\n new_frames = []\n # comment: for every student id\n for student in allstudents:\n print(\"Checking student rows.\")\n\n this_student_data = combined_data.loc[combined_data[\"Registrar ID\"] == student]\n\n #print(this_student_data)\n #print(\"*****\")\n #print(len(this_student_data))\n if len(this_student_data) > 1:\n major = \"\"\n college = \"\"\n for index,row in this_student_data.iterrows():\n major += row[\"Major\"] + \"; \"\n college += row[\"College\"] + \"; \"\n #print(row)\n #print(row[\"Major\"])\n new_row = this_student_data.iloc[[0]]\n\n else:\n new_row = this_student_data.iloc[[0]]\n #print (new_row)\n new_frames.append(new_row)\n\n return(new_frames)\n\ndef combine_recursive(directory):\n all_frames = []\n for dirpath, dirnames, files in os.walk(directory):\n for name in files:\n this_new_frame = combine_tabs(os.path.join(dirpath, name))\n if this_new_frame:\n all_frames += combine_tabs(os.path.join(dirpath, name))\n return(all_frames)\n\ndef process_new_data(output_frames, master_student_data, all_master, instructor_codes, output_filename):\n new_combined_data = pandas.concat(output_frames, sort=False)\n\n if len(output_frames) > 0:\n # get last (highest) student code from master student file\n last_student_code = master_student_data['Crow ID'].max()\n # very important step, so student ID works\n new_combined_data = new_combined_data.reset_index(drop = True)\n # get first student ID we should use\n start = last_student_code+1\n # add Crow IDs to new dataframe\n new_combined_data['New Crow ID'] = new_combined_data.index + start\n\n new_combined_data2 = pandas.merge(new_combined_data, instructor_codes,\n on='Instructor Last Name', how='left')\n new_combined_data3 = pandas.merge(new_combined_data2, master_student_data,\n on='Registrar ID', how='left')\n new_combined_data3[['Crow ID']] = new_combined_data3[['Crow ID']].fillna(0)\n new_combined_data3[['Crow ID']] = new_combined_data3[['Crow ID']].astype(int)\n\n df = new_combined_data3\n df['Crow_ID'] = numpy.where(df['Crow ID']==0, df['New Crow ID'], df['Crow ID'])\n\n # Delete the extra crow ID columns from the dataframe\n df = df.drop(\"New Crow ID\", axis=1)\n df = df.drop(\"Crow ID\", axis=1)\n df = df.rename(index=str, columns={'Crow_ID': 'Crow ID'})\n\n df = df.drop_duplicates()\n df.to_csv(output_filename, index = False)\n\n df['Acad Level'] = numpy.nan\n master_slice = df[['Catalog Nbr', 'Class Section', 'Registrar ID', 'First Name', 'Last Name', 'Acad Level', 'College', 'Major', 'Birth Country Code', 'Gender', 'TOEFL COMPI', 'TOEFL Listening', 'TOEFL Reading', 'TOEFL Writing', 'TOEFL Speaking', 'Crow ID', 'Instructor Code', 'Alternate Name', 'term', 'mode_of_course', 'length_of_course', 'institution']]\n\n new_master = pandas.concat([all_master, master_slice], sort = False)\n output_filename = re.sub(r'\\s+', r'_', output_filename)\n new_master.to_csv('master_' + output_filename, index = False)\n\n\nif args.master_student and args.instructor_codes:\n if '.xls' in args.master_student:\n master_student_file = pandas.ExcelFile(args.master_student)\n master_student_data = pandas.read_excel(master_student_file)\n elif '.csv' in args.master_student:\n master_student_data = pandas.read_csv(args.master_student)\n\n # get last (highest) section code from master student file\n last_section_code = master_student_data['Class Section'].max()\n\n instructor_codes_file = pandas.ExcelFile(args.instructor_codes)\n instructor_codes = pandas.read_excel(instructor_codes_file, 'UA')\n\n if args.dir and os.path.isdir(args.dir):\n output_filename = re.sub(r'\\.+\\/|\\.+\\\\', r'', args.dir)\n output_filename = re.sub(r'\\/|\\\\', r'_', output_filename)\n output_filename += '_processed.csv'\n output_filename = re.sub(r'_+', r'_', output_filename)\n output_frames = combine_recursive(args.dir)\n process_new_data(output_frames, master_student_data[['Registrar ID', 'Crow ID']], master_student_data, instructor_codes, output_filename)\n\n elif args.file and os.path.isfile(args.file):\n output_filename = re.sub(r'.+\\/|.+\\\\|\\.xlsx?', r'', args.file)\n output_filename += '_processed.csv'\n output_filename = re.sub(r'_+', r'_', output_filename)\n output_frames = combine_tabs(args.file)\n process_new_data(output_frames, last_student_code, master_student_data[['Registrar ID', 'Crow ID']], instructor_codes, last_section_code)\n else:\n print('You need to supply a valid directory or filename')\nelse:\n print('You need to supply a valid master student and instructor codes files')\n"
]
| [
[
"pandas.merge",
"pandas.read_excel",
"pandas.ExcelFile",
"numpy.where",
"pandas.concat",
"pandas.read_csv"
]
]
|
dosemeion/nni | [
"9b2bb11d8bafed4002b7f9eae98a39f2fead7c1a"
]
| [
"test/ut/compression/v2/test_iterative_pruner_torch.py"
]
| [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport random\nimport unittest\n\nimport torch\nimport torch.nn.functional as F\n\nfrom nni.algorithms.compression.v2.pytorch.pruning import (\n LinearPruner,\n AGPPruner,\n LotteryTicketPruner,\n SimulatedAnnealingPruner,\n AutoCompressPruner\n)\n\nfrom nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact, trace_parameters\n\n\nclass TorchModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(1, 5, 5, 1)\n self.bn1 = torch.nn.BatchNorm2d(5)\n self.conv2 = torch.nn.Conv2d(5, 10, 5, 1)\n self.bn2 = torch.nn.BatchNorm2d(10)\n self.fc1 = torch.nn.Linear(4 * 4 * 10, 100)\n self.fc2 = torch.nn.Linear(100, 10)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 10)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef trainer(model, optimizer, criterion):\n model.train()\n for _ in range(10):\n input = torch.rand(10, 1, 28, 28)\n label = torch.Tensor(list(range(10))).type(torch.LongTensor)\n optimizer.zero_grad()\n output = model(input)\n loss = criterion(output, label)\n loss.backward()\n optimizer.step()\n\n\ndef get_optimizer(model):\n return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n\n\ncriterion = torch.nn.CrossEntropyLoss()\n\n\ndef evaluator(model):\n return random.random()\n\n\nclass IterativePrunerTestCase(unittest.TestCase):\n def test_linear_pruner(self):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n pruner = LinearPruner(model, config_list, 'level', 3, log_dir='../../../logs')\n pruner.compress()\n _, pruned_model, masks, _, _ = pruner.get_best_result()\n sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)\n assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82\n\n def test_agp_pruner(self):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n pruner = AGPPruner(model, config_list, 'level', 3, log_dir='../../../logs')\n pruner.compress()\n _, pruned_model, masks, _, _ = pruner.get_best_result()\n sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)\n assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82\n\n def test_lottery_ticket_pruner(self):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n pruner = LotteryTicketPruner(model, config_list, 'level', 3, log_dir='../../../logs')\n pruner.compress()\n _, pruned_model, masks, _, _ = pruner.get_best_result()\n sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)\n assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82\n\n def test_simulated_annealing_pruner(self):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n pruner = SimulatedAnnealingPruner(model, config_list, evaluator, start_temperature=40, log_dir='../../../logs')\n pruner.compress()\n _, pruned_model, masks, _, _ = pruner.get_best_result()\n sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)\n assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82\n\n def test_auto_compress_pruner(self):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n admm_params = {\n 'trainer': trainer,\n 'traced_optimizer': get_optimizer(model),\n 'criterion': criterion,\n 'iterations': 10,\n 'training_epochs': 1\n }\n sa_params = {\n 'evaluator': evaluator,\n 'start_temperature': 40\n }\n pruner = AutoCompressPruner(model, config_list, 10, admm_params, sa_params=sa_params, log_dir='../../../logs')\n pruner.compress()\n _, pruned_model, masks, _, _ = pruner.get_best_result()\n sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)\n print(sparsity_list)\n assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"torch.nn.Linear",
"torch.rand",
"torch.nn.BatchNorm2d",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d",
"torch.nn.CrossEntropyLoss"
]
]
|
Lifelong-ML/LASEM | [
"c4ec052c850e37f54bc3e6faf6b988a4c5239f10"
]
| [
"classification/model/cnn_darts_model.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nfrom random import shuffle\n\nfrom utils.utils import get_value_of_valid_tensors, savemat_wrapper, savemat_wrapper_nested_list, count_trainable_var2\nfrom utils.utils import new_weight, new_bias, new_ELLA_KB_param, get_list_of_valid_tensors, data_x_add_dummy, data_x_and_y_add_dummy\nfrom utils.utils_nn import new_flexible_hardparam_cnn_fc_nets, new_darts_cnn_fc_net\nfrom utils.utils_df_nn import new_ELLA_flexible_cnn_deconv_tensordot_fc_net, new_darts_dfcnn_fc_net\nfrom classification.model.lifelong_model_frame import Lifelong_Model_Frame\n\n_tf_ver = tf.__version__.split('.')\n_up_to_date_tf = int(_tf_ver[0]) > 1 or (int(_tf_ver[0])==1 and int(_tf_ver[1]) >= 14)\nif _up_to_date_tf:\n _tf_tensor = tf.is_tensor\nelse:\n _tf_tensor = tf.contrib.framework.is_tensor\n\n\n########################################################\n#### DARTS (Differentiable Architecture Search) ####\n#### based Selective Sharing baseline model ####\n########################################################\nclass LL_HPS_CNN_DARTS_net(Lifelong_Model_Frame):\n def __init__(self, model_hyperpara, train_hyperpara):\n super().__init__(model_hyperpara, train_hyperpara)\n self.approx_order=model_hyperpara['darts_approx_order']\n self.conv_sharing = []\n\n def _possible_choices(input_subsets):\n list_subsets = []\n for c in [False, True]:\n for elem in input_subsets:\n list_subsets.append(elem+[c])\n return list_subsets\n\n self._possible_configs = [[]]\n for layer_cnt in range(self.num_conv_layers):\n self._possible_configs = _possible_choices(self._possible_configs)\n self.num_possible_configs = len(self._possible_configs)\n\n def _build_task_model(self, net_input, output_size, task_cnt, params=None, trainable=False):\n if params is None:\n params_shared_conv, params_TS_conv, params_fc = None, None, None\n else:\n params_shared_conv, params_TS_conv, params_fc = params['Shared_Conv'], params['TS_Conv'], params['FC']\n\n if params_TS_conv is not None:\n assert (len(params_TS_conv) == 2*self.num_conv_layers), \"Given trained parameters of conv doesn't match to the hyper-parameters!\"\n if params_fc is not None:\n assert (len(params_fc) == 2*self.num_fc_layers), \"Given trained parameters of fc doesn't match to the hyper-parameters!\"\n\n eval_net = []\n if (task_cnt==self.current_task) and self.task_is_new:\n ## DARTS-based Hybrid HPS\n with tf.name_scope('DARTS_HPS'):\n task_net, _, conv_TS_params, conv_select_params, fc_params = new_darts_cnn_fc_net(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], cnn_activation_fn=self.hidden_act, cnn_shared_params=params_shared_conv, cnn_TS_params=params_TS_conv, select_params=None, fc_activation_fn=self.hidden_act, fc_params=params_fc, padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, input_size=self.input_size[0:2], trainable=trainable)\n self.conv_select_params = conv_select_params\n\n ## build network for evaluation\n for conf in self._possible_configs:\n net_tmp, _, _, _ = new_flexible_hardparam_cnn_fc_nets(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], conf, cnn_activation_fn=self.hidden_act, shared_cnn_params=params_shared_conv, cnn_params=conv_TS_params, fc_activation_fn=self.hidden_act, fc_params=fc_params, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, padding_type=self.padding_type, input_size=self.input_size[0:2], trainable=trainable, trainable_shared=trainable)\n eval_net.append(net_tmp[-1])\n else:\n ## Hybrid HPS with the learned configuration\n task_net, conv_TS_params, _, fc_params = new_flexible_hardparam_cnn_fc_nets(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], self.conv_sharing[task_cnt], cnn_activation_fn=self.hidden_act, shared_cnn_params=params_shared_conv, cnn_params=params_TS_conv, fc_activation_fn=self.hidden_act, fc_params=params_fc, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, padding_type=self.padding_type, input_size=self.input_size[0:2], trainable=trainable, trainable_shared=trainable)\n return task_net, eval_net, conv_TS_params, fc_params\n\n def _build_whole_model(self):\n for task_cnt, (num_classes, x_b) in enumerate(zip(self.output_sizes, self.x_batch)):\n if (task_cnt==self.current_task) and (self.task_is_new):\n param_to_reuse = {'Shared_Conv': self.shared_conv_params, 'TS_Conv': None, 'FC': None}\n else:\n param_to_reuse = {'Shared_Conv': self.shared_conv_params, 'TS_Conv': self.np_params[task_cnt]['TS_Conv'], 'FC': self.np_params[task_cnt]['FC']}\n task_net, eval_net, conv_TS_params, fc_params = self._build_task_model(x_b, num_classes, task_cnt, params=param_to_reuse, trainable=(task_cnt==self.current_task))\n\n self.task_models.append(task_net)\n self.conv_params.append(conv_TS_params)\n self.fc_params.append(fc_params)\n self.params.append(self._collect_trainable_variables())\n self.num_trainable_var += count_trainable_var2(self.params[-1]) if task_cnt < 1 else count_trainable_var2(self.params[-1]) - self.shared_conv_params_size\n\n if len(eval_net) > 0:\n self.darts_eval_models = eval_net\n\n #self.conv_trainable_param = get_list_of_valid_tensors(self.conv_params[self.current_task])\n #self.fc_trainable_param = get_list_of_valid_tensors(self.fc_params[self.current_task])\n #self.trainable_params = list(self.dfcnn_KB_trainable_param) + list(self.dfcnn_TS_trainable_param) + list(self.conv_trainable_param) + list(self.fc_trainable_param)\n\n def add_new_task(self, output_dim, curr_task_index, single_input_placeholder=False):\n self.conv_select_params, self.darts_eval_models = None, None\n self._shared_param_init()\n super().add_new_task(output_dim, curr_task_index, single_input_placeholder=single_input_placeholder)\n\n def _shared_param_init(self):\n shared_conv_init_val = self.np_params[0]['Shared_Conv'] if hasattr(self, 'np_params') else [None for _ in range(2*self.num_conv_layers)]\n self.shared_conv_params = []\n for layer_cnt in range(self.num_conv_layers):\n self.shared_conv_params.append(new_weight(shape=self.cnn_kernel_size[2*layer_cnt:2*(layer_cnt+1)]+self.cnn_channels_size[layer_cnt:layer_cnt+2], init_tensor=shared_conv_init_val[2*layer_cnt], trainable=True, name='Shared_Conv_W%d'%(layer_cnt)))\n self.shared_conv_params.append(new_bias(shape=[self.cnn_channels_size[layer_cnt+1]], init_tensor=shared_conv_init_val[2*layer_cnt+1], trainable=True, name='Shared_Conv_b%d'%(layer_cnt)))\n self.shared_conv_params_size = count_trainable_var2(self.shared_conv_params)\n\n def get_darts_selection_val(self, sess):\n return get_value_of_valid_tensors(sess, self.conv_select_params)\n\n def get_params_val(self, sess, use_npparams=True):\n selection_params_val = self.get_darts_selection_val(sess)\n if use_npparams:\n shared_conv_val = self.np_params[0]['Shared_Conv']\n TS_conv_val = [np_p['TS_Conv'] for np_p in self.np_params]\n fc_val = [np_p['FC'] for np_p in self.np_params]\n else:\n shared_conv_val = get_value_of_valid_tensors(sess, self.shared_conv_params)\n TS_conv_val = [get_value_of_valid_tensors(sess, cnn_TS_param) for cnn_TS_param in self.conv_params]\n fc_val = [get_value_of_valid_tensors(sess, fc_param) for fc_param in self.fc_params]\n\n parameters_val = {}\n parameters_val['DARTS_selection_param'] = savemat_wrapper(selection_params_val)\n parameters_val['shared_conv'] = savemat_wrapper(shared_conv_val)\n parameters_val['TS_conv'] = savemat_wrapper_nested_list(TS_conv_val)\n parameters_val['fc_weights'] = savemat_wrapper_nested_list(fc_val)\n return parameters_val\n\n def best_config(self, sess):\n ## return the index of appropriate sharing configuration (self._possible_configs) according to the value of DARTS selection parameters\n selection_val = self.get_darts_selection_val(sess)\n # argmax 0 -> task-specific / argmax 1 -> shared\n selected_config_index = 0\n for layer_cnt, (layer_select) in enumerate(selection_val):\n selected_config_index = selected_config_index + np.argmax(layer_select) * (2**layer_cnt)\n return selected_config_index\n\n def darts_learned_selection(self, sess):\n ## return the list of decision (T:shared/F:task-specific) of sharing in each layer according to the value of DARTS selection parameters\n ## for elements of self.conv_sharing (e.g. 'bottom2' : [TTFFF..])\n selection_val = self.get_darts_selection_val(sess)\n sharing_flags = []\n for layer_select in selection_val:\n sharing_flags.append(np.argmax(layer_select))\n return sharing_flags\n\n def define_eval(self):\n with tf.name_scope('Model_Eval'):\n mask = tf.reshape(tf.cast(tf.range(self.batch_size)<self.num_data_in_batch, dtype=tf.float32), [self.batch_size, 1])\n self.eval = [tf.nn.softmax(task_model[-1])*mask for task_model in self.task_models]\n self.pred = [tf.argmax(task_model[-1]*mask, 1) for task_model in self.task_models]\n if self.task_is_new:\n self.eval_for_new_task = [tf.nn.softmax(task_model)*mask for task_model in self.darts_eval_models]\n self.pred_for_new_task = [tf.argmax(task_model*mask, 1) for task_model in self.darts_eval_models]\n\n def _loss_func(self, y1, y2):\n return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.cast(y1, tf.int32), logits=y2))\n\n def define_loss(self):\n with tf.name_scope('Model_Loss'):\n self.loss = [self._loss_func(y_batch, task_model[-1]) for y_batch, task_model in zip(self.y_batch, self.task_models)]\n\n def define_accuracy(self):\n with tf.name_scope('Model_Accuracy'):\n mask = tf.cast(tf.range(self.batch_size)<self.num_data_in_batch, dtype=tf.float32)\n self.accuracy = [tf.reduce_sum(tf.cast(tf.equal(tf.argmax(task_model[-1], 1), tf.cast(y_batch, tf.int64)), tf.float32)*mask) for y_batch, task_model in zip(self.y_batch, self.task_models)]\n if self.task_is_new:\n self.accuracy_for_new_task = [tf.reduce_sum(tf.cast(tf.equal(tf.argmax(task_model, 1), tf.cast(self.y_batch[self.current_task], tf.int64)), tf.float32)*mask) for task_model in self.darts_eval_models]\n\n def define_opt(self):\n with tf.name_scope('Optimization'):\n self.grads = tf.gradients(self.loss[self.current_task], self.params[self.current_task])\n trainer = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate/(1.0+self.epoch*self.learn_rate_decay))\n self.update = trainer.apply_gradients(list(zip(self.grads, self.params[self.current_task])))\n if self.task_is_new:\n if self.approx_order == 1:\n self.selection_grads = tf.gradients(self.loss[self.current_task], self.conv_select_params)\n elif self.approx_order == 2:\n #new_approx_params = [p-g*(self.learn_rate/(1.0+self.epoch*self.learn_rate_decay)) for (p, g) in zip(self.params[self.current_task], self.grads)]\n new_approx_params = [p-g*self.learn_rate for (p, g) in zip(self.params[self.current_task], self.grads)]\n new_shared_conv = new_approx_params[0:2*self.num_conv_layers]\n new_TS_conv = new_approx_params[2*self.num_conv_layers:4*self.num_conv_layers]\n new_fc = new_approx_params[4*self.num_conv_layers:]\n\n unrolled_model, _, _, _, _ = new_darts_cnn_fc_net(self.x_batch[self.current_task], self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[self.output_sizes[self.current_task]], cnn_activation_fn=self.hidden_act, cnn_shared_params=new_shared_conv, cnn_TS_params=new_TS_conv, select_params=self.conv_select_params, fc_activation_fn=self.hidden_act, fc_params=new_fc, padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, input_size=self.input_size[0:2])\n unrolled_loss = self._loss_func(self.y_batch[self.current_task], unrolled_model[-1])\n #self.selection_grads = tf.gradients(unrolled_loss, self.conv_select_params)\n selection_grads = tf.gradients(unrolled_loss, self.conv_select_params)\n dw = tf.gradients(unrolled_loss, new_approx_params)\n\n ## compute partial gradient approximating hessian\n ratios = [0.01/tf.norm(g) for g in dw]\n approx_params_upper = [p+g*r for (p, g, r) in zip(new_approx_params, dw, ratios)]\n upper_model, _, _, _, _ = new_darts_cnn_fc_net(self.x_batch[self.current_task], self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[self.output_sizes[self.current_task]], cnn_activation_fn=self.hidden_act, cnn_shared_params=approx_params_upper[0:2*self.num_conv_layers], cnn_TS_params=approx_params_upper[2*self.num_conv_layers:4*self.num_conv_layers], select_params=self.conv_select_params, fc_activation_fn=self.hidden_act, fc_params=approx_params_upper[4*self.num_conv_layers:], padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, input_size=self.input_size[0:2])\n upper_loss = self._loss_func(self.y_batch[self.current_task], upper_model[-1])\n upper_grad = tf.gradients(upper_loss, self.conv_select_params)\n\n approx_params_lower = [p-g*r for (p, g, r) in zip(new_approx_params, dw, ratios)]\n lower_model, _, _, _, _ = new_darts_cnn_fc_net(self.x_batch[self.current_task], self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[self.output_sizes[self.current_task]], cnn_activation_fn=self.hidden_act, cnn_shared_params=approx_params_lower[0:2*self.num_conv_layers], cnn_TS_params=approx_params_lower[2*self.num_conv_layers:4*self.num_conv_layers], select_params=self.conv_select_params, fc_activation_fn=self.hidden_act, fc_params=approx_params_lower[4*self.num_conv_layers:], padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, input_size=self.input_size[0:2])\n lower_loss = self._loss_func(self.y_batch[self.current_task], lower_model[-1])\n lower_grad = tf.gradients(lower_loss, self.conv_select_params)\n\n #self.selection_grads = [g-(self.learn_rate/(1.0+self.epoch*self.learn_rate_decay)/(2*r))*(u-l) for (g, r, u, l) in zip(selection_grads, ratios, upper_grad, lower_grad)]\n self.selection_grads = [g-(self.learn_rate/(2*r))*(u-l) for (g, r, u, l) in zip(selection_grads, ratios, upper_grad, lower_grad)]\n\n trainer2 = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate/(1.0+self.epoch*self.learn_rate_decay))\n self.selection_update = trainer2.apply_gradients(list(zip(self.selection_grads, self.conv_select_params)))\n\n def convert_tfVar_to_npVar(self, sess):\n if not (self.num_tasks == 1 and self.task_is_new):\n orig_KB = list(self.np_params[0]['Shared_Conv']) ## copy of shared conv before training current task\n else:\n orig_KB = [None for _ in range(2*self.num_conv_layers)]\n\n def list_param_converter(list_of_params):\n converted_params = []\n for p in list_of_params:\n if type(p) == np.ndarray:\n converted_params.append(p)\n elif _tf_tensor(p):\n converted_params.append(sess.run(p))\n else:\n converted_params.append(p) ## append 'None' param\n return converted_params\n\n def double_list_param_converter(list_of_params):\n converted_params = []\n for task_params in list_of_params:\n converted_params.append(list_param_converter(task_params))\n return converted_params\n\n def post_process(layers_to_share, original_KB, updated_KB, updated_conv):\n for layer_cnt, (sharing_flag) in enumerate(layers_to_share):\n if sharing_flag:\n ### Sharing this layer -> use new KB, TS and generated conv (no action needed), and make conv param None\n updated_conv[self.current_task][2*layer_cnt], updated_conv[self.current_task][2*layer_cnt+1] = None, None\n else:\n ### Not sharing this layer -> roll back KB, make TS and generated conv None, and keep conv param (no action needed)\n updated_KB[2*layer_cnt], updated_KB[2*layer_cnt+1] = original_KB[2*layer_cnt], original_KB[2*layer_cnt+1]\n return updated_KB, updated_conv\n\n self.np_params = []\n if len(self.conv_sharing) < self.num_tasks:\n self.conv_sharing.append(self.darts_learned_selection(sess))\n np_shared = list_param_converter(self.shared_conv_params)\n np_TS = double_list_param_converter(self.conv_params)\n np_fc = double_list_param_converter(self.fc_params)\n\n np_shared, np_TS = post_process(self.conv_sharing[self.current_task], orig_KB, np_shared, np_TS)\n for t, f in zip(np_TS, np_fc):\n self.np_params.append({'Shared_Conv': np_shared, 'TS_Conv': t, 'FC': f} if len(self.np_params)< 1 else {'TS_Conv': t, 'FC': f})\n\n def _collect_trainable_variables(self):\n return_list = []\n for p in self.shared_conv_params:\n if p is not None:\n return_list.append(p)\n for p in self.conv_params[-1]:\n if p is not None:\n return_list.append(p)\n for p in self.fc_params[-1]:\n if p is not None:\n return_list.append(p)\n return return_list\n\n def train_one_epoch(self, sess, data_x, data_y, epoch_cnt, task_index, learning_indices=None, augment_data=False, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_train = data_x.shape[0]\n if learning_indices is None:\n learning_indices = list(range(num_train))\n shuffle(learning_indices)\n\n for batch_cnt in range(num_train//self.batch_size):\n batch_train_x = data_x[learning_indices[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size]]\n batch_train_y = data_y[learning_indices[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size]]\n\n if self.task_is_new:\n ## Update architecture (selection param)\n sess.run(self.selection_update, feed_dict={self.model_input[task_model_index]: batch_train_x, self.true_output[task_model_index]: batch_train_y, self.epoch: epoch_cnt, self.dropout_prob: dropout_prob})\n\n ## Update NN weights\n sess.run(self.update, feed_dict={self.model_input[task_model_index]: batch_train_x, self.true_output[task_model_index]: batch_train_y, self.epoch: epoch_cnt, self.dropout_prob: dropout_prob})\n\n def eval_one_task(self, sess, data_x, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data, num_classes = data_x.shape[0], self.output_sizes[task_model_index]\n eval_output = np.zeros([num_data, num_classes], dtype=np.float32)\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n eval_func = self.eval_for_new_task[best_config]\n else:\n eval_func = self.eval[task_model_index]\n\n for batch_cnt in range(num_batch):\n eval_output[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size] = sess.run(eval_func, feed_dict={self.model_input: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n if num_remains > 0:\n temp_pred = sess.run(eval_func, feed_dict={self.model_input: data_x_add_dummy(data_x[-num_remains:], self.batch_size), self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n eval_output[-num_remains:] = temp_pred[0:num_remains]\n return eval_output\n\n def infer_one_task(self, sess, data_x, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data = data_x.shape[0]\n inferred_labels = np.zeros(num_data, dtype=np.int32)\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n pred_func = self.pred_for_new_task[best_config]\n else:\n pred_func = self.pred[task_model_index]\n\n for batch_cnt in range(num_batch):\n temp_pred = sess.run(pred_func, feed_dict={self.model_input[task_model_index]: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n inferred_labels[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size] = np.squeeze(temp_pred)\n if num_remains > 0:\n temp_pred = sess.run(pred_func, feed_dict={self.model_input[task_model_index]: data_x_add_dummy(data_x[-num_remains:], self.batch_size), self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n inferred_labels[-num_remains:] = np.squeeze(temp_pred[0:num_remains])\n return inferred_labels\n\n def compute_accuracy_one_task(self, sess, data_x, data_y, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data, accuracy = data_x.shape[0], 0.0\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n acc_func = self.accuracy_for_new_task[best_config]\n else:\n acc_func = self.accuracy[task_model_index]\n\n for batch_cnt in range(num_batch):\n accuracy += sess.run(acc_func, feed_dict={self.model_input[task_model_index]: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.true_output[task_model_index]: data_y[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n if num_remains > 0:\n tmp_x, tmp_y = data_x_and_y_add_dummy(data_x[-num_remains:], data_y[-num_remains:], self.batch_size)\n accuracy += sess.run(acc_func, feed_dict={self.model_input[task_model_index]: tmp_x, self.true_output[task_model_index]: tmp_y, self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n return float(accuracy)/float(num_data)\n\n\n########################################################\n#### DARTS (Differentiable Architecture Search) ####\n#### based Selective Sharing baseline model ####\n########################################################\nclass LL_DFCNN_DARTS_net(Lifelong_Model_Frame):\n def __init__(self, model_hyperpara, train_hyperpara):\n super().__init__(model_hyperpara, train_hyperpara)\n self.dfcnn_KB_size = model_hyperpara['cnn_KB_sizes']\n self.dfcnn_TS_size = model_hyperpara['cnn_TS_sizes']\n self.dfcnn_stride_size = model_hyperpara['cnn_deconv_stride_sizes']\n self.dfcnn_KB_reg_scale = model_hyperpara['regularization_scale'][1]\n self.dfcnn_TS_reg_scale = model_hyperpara['regularization_scale'][3]\n self.approx_order=model_hyperpara['darts_approx_order']\n self.conv_sharing = []\n\n def _possible_choices(input_subsets):\n list_subsets = []\n for c in [False, True]:\n for elem in input_subsets:\n list_subsets.append(elem+[c])\n return list_subsets\n\n self._possible_configs = [[]]\n for layer_cnt in range(self.num_conv_layers):\n self._possible_configs = _possible_choices(self._possible_configs)\n self.num_possible_configs = len(self._possible_configs)\n\n def _build_task_model(self, net_input, output_size, task_cnt, params=None, trainable=False):\n if params is None:\n params_KB, params_TS, params_conv, params_fc = None, None, None, None\n else:\n params_KB, params_TS, params_conv, params_fc = params['KB'], params['TS'], params['TS_Conv'], params['FC']\n\n if params_conv is not None:\n assert (len(params_conv) == 2*self.num_conv_layers), \"Given trained parameters of conv doesn't match to the hyper-parameters!\"\n if params_fc is not None:\n assert (len(params_fc) == 2*self.num_fc_layers), \"Given trained parameters of fc doesn't match to the hyper-parameters!\"\n\n eval_net = []\n if (task_cnt==self.current_task) and self.task_is_new:\n ## DARTS-based DF-CNN\n with tf.name_scope('DARTS_DFCNN'):\n task_net, _, dfcnn_TS_params, conv_params, conv_select_params, fc_params = new_darts_dfcnn_fc_net(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], self.dfcnn_KB_size, self.dfcnn_TS_size, self.dfcnn_stride_size, cnn_activation_fn=self.hidden_act, dfcnn_TS_activation_fn=None, fc_activation_fn=self.hidden_act, dfcnn_KB_params=params_KB, dfcnn_TS_params=params_TS, cnn_TS_params=params_conv, select_params=None, fc_params=params_fc, KB_reg_type=self.KB_l2_reg, TS_reg_type=self.TS_l2_reg, padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, trainable=trainable, task_index=task_cnt)\n self.conv_select_params = conv_select_params\n\n ## build network for evaluation\n for conf in self._possible_configs:\n net_tmp, _, _, _, _, _, _ = new_ELLA_flexible_cnn_deconv_tensordot_fc_net(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], conf, self.dfcnn_KB_size, self.dfcnn_TS_size, self.dfcnn_stride_size, cnn_activation_fn=self.hidden_act, cnn_para_activation_fn=None, cnn_KB_params=params_KB, cnn_TS_params=dfcnn_TS_params, cnn_params=conv_params, fc_activation_fn=self.hidden_act, fc_params=fc_params, KB_reg_type=self.KB_l2_reg, TS_reg_type=self.TS_l2_reg, padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, task_index=task_cnt, skip_connections=list(self.skip_connect), trainable=trainable)\n eval_net.append(net_tmp[-1])\n else:\n ## DF-CNN with the learned configuration\n task_net, _, dfcnn_TS_params, _, conv_params, _, fc_params = new_ELLA_flexible_cnn_deconv_tensordot_fc_net(net_input, self.cnn_kernel_size, self.cnn_channels_size, self.cnn_stride_size, list(self.fc_size)+[output_size], self.conv_sharing[task_cnt], self.dfcnn_KB_size, self.dfcnn_TS_size, self.dfcnn_stride_size, cnn_activation_fn=self.hidden_act, cnn_para_activation_fn=None, cnn_KB_params=params_KB, cnn_TS_params=params_TS, cnn_params=params_conv, fc_activation_fn=self.hidden_act, fc_params=params_fc, KB_reg_type=self.KB_l2_reg, TS_reg_type=self.TS_l2_reg, padding_type=self.padding_type, max_pool=self.max_pooling, pool_sizes=self.pool_size, dropout=self.dropout, dropout_prob=self.dropout_prob, task_index=task_cnt, skip_connections=list(self.skip_connect), trainable=trainable)\n return task_net, eval_net, dfcnn_TS_params, conv_params, fc_params\n\n def _build_whole_model(self):\n for task_cnt, (num_classes, x_b) in enumerate(zip(self.output_sizes, self.x_batch)):\n if (task_cnt==self.current_task) and (self.task_is_new):\n param_to_reuse = {'KB': self.dfcnn_KB_params, 'TS': None, 'TS_Conv': None, 'FC': None}\n else:\n param_to_reuse = {'KB': self.dfcnn_KB_params, 'TS': self.np_params[task_cnt]['TS'], 'TS_Conv': self.np_params[task_cnt]['TS_Conv'], 'FC': self.np_params[task_cnt]['FC']}\n task_net, eval_net, dfcnn_TS_params, conv_TS_params, fc_params = self._build_task_model(x_b, num_classes, task_cnt, params=param_to_reuse, trainable=(task_cnt==self.current_task))\n\n self.task_models.append(task_net)\n self.dfcnn_TS_params.append(dfcnn_TS_params)\n self.conv_params.append(conv_TS_params)\n self.fc_params.append(fc_params)\n self.params.append(self._collect_trainable_variables())\n self.num_trainable_var += count_trainable_var2(self.params[-1]) if task_cnt < 1 else count_trainable_var2(self.params[-1]) - self.dfcnn_KB_params_size\n\n if len(eval_net) > 0:\n self.darts_eval_models = eval_net\n\n self.dfcnn_KB_trainable_param = get_list_of_valid_tensors(self.dfcnn_KB_params)\n self.dfcnn_TS_trainable_param = get_list_of_valid_tensors(self.dfcnn_TS_params[self.current_task])\n self.conv_trainable_param = get_list_of_valid_tensors(self.conv_params[self.current_task])\n self.fc_trainable_param = get_list_of_valid_tensors(self.fc_params[self.current_task])\n self.trainable_params = list(self.dfcnn_KB_trainable_param) + list(self.dfcnn_TS_trainable_param) + list(self.conv_trainable_param) + list(self.fc_trainable_param)\n\n def add_new_task(self, output_dim, curr_task_index, single_input_placeholder=False):\n self.conv_select_params, self.darts_eval_models = None, None\n self._shared_param_init()\n super().add_new_task(output_dim, curr_task_index, single_input_placeholder=single_input_placeholder)\n\n def _shared_param_init(self):\n self.dfcnn_TS_params = []\n self.KB_l2_reg = tf.contrib.layers.l2_regularizer(scale=self.dfcnn_KB_reg_scale)\n self.TS_l2_reg = tf.contrib.layers.l2_regularizer(scale=self.dfcnn_TS_reg_scale)\n KB_init_val = self.np_params[0]['KB'] if hasattr(self, 'np_params') else [None for _ in range(self.num_conv_layers)]\n self.dfcnn_KB_params = [new_ELLA_KB_param([1, self.dfcnn_KB_size[2*layer_cnt], self.dfcnn_KB_size[2*layer_cnt], self.dfcnn_KB_size[2*layer_cnt+1]], layer_cnt, 0, self.KB_l2_reg, KB_init_val[layer_cnt], True) for layer_cnt in range(self.num_conv_layers)]\n self.dfcnn_KB_params_size = count_trainable_var2(self.dfcnn_KB_params)\n\n def get_darts_selection_val(self, sess):\n return get_value_of_valid_tensors(sess, self.conv_select_params)\n\n def get_params_val(self, sess, use_npparams=True):\n selection_params_val = self.get_darts_selection_val(sess)\n if use_npparams:\n KB_val = self.np_params[0]['KB']\n TS_val = [np_p['TS'] for np_p in self.np_params]\n TS_conv_val = [np_p['TS_Conv'] for np_p in self.np_params]\n fc_val = [np_p['FC'] for np_p in self.np_params]\n else:\n KB_val = get_value_of_valid_tensors(sess, self.dfcnn_KB_params)\n TS_val = [get_value_of_valid_tensors(sess, dfcnn_TS_param) for dfcnn_TS_param in self.dfcnn_TS_params]\n TS_conv_val = [get_value_of_valid_tensors(sess, cnn_TS_param) for cnn_TS_param in self.conv_params]\n fc_val = [get_value_of_valid_tensors(sess, fc_param) for fc_param in self.fc_params]\n\n parameters_val = {}\n parameters_val['DARTS_selection_param'] = savemat_wrapper(selection_params_val)\n parameters_val['KB'] = savemat_wrapper(KB_val)\n parameters_val['TS'] = savemat_wrapper_nested_list(TS_val)\n parameters_val['TS_conv'] = savemat_wrapper_nested_list(TS_conv_val)\n parameters_val['fc_weights'] = savemat_wrapper_nested_list(fc_val)\n return parameters_val\n\n def best_config(self, sess):\n ## return the index of appropriate sharing configuration (self._possible_configs) according to the value of DARTS selection parameters\n selection_val = self.get_darts_selection_val(sess)\n # argmax 0 -> task-specific / argmax 1 -> shared\n selected_config_index = 0\n for layer_cnt, (layer_select) in enumerate(selection_val):\n selected_config_index = selected_config_index + np.argmax(layer_select) * (2**layer_cnt)\n return selected_config_index\n\n def darts_learned_selection(self, sess):\n ## return the list of decision (T:shared/F:task-specific) of sharing in each layer according to the value of DARTS selection parameters\n ## for elements of self.conv_sharing (e.g. 'bottom2' : [TTFFF..])\n selection_val = self.get_darts_selection_val(sess)\n sharing_flags = []\n for layer_select in selection_val:\n sharing_flags.append(np.argmax(layer_select))\n return sharing_flags\n\n def define_eval(self):\n with tf.name_scope('Model_Eval'):\n mask = tf.reshape(tf.cast(tf.range(self.batch_size)<self.num_data_in_batch, dtype=tf.float32), [self.batch_size, 1])\n self.eval = [tf.nn.softmax(task_model[-1])*mask for task_model in self.task_models]\n self.pred = [tf.argmax(task_model[-1]*mask, 1) for task_model in self.task_models]\n if self.task_is_new:\n self.eval_for_new_task = [tf.nn.softmax(task_model)*mask for task_model in self.darts_eval_models]\n self.pred_for_new_task = [tf.argmax(task_model*mask, 1) for task_model in self.darts_eval_models]\n\n def _loss_func(self, y1, y2):\n return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.cast(y1, tf.int32), logits=y2))\n\n def define_loss(self):\n with tf.name_scope('Model_Loss'):\n self.loss = [self._loss_func(y_batch, task_model[-1]) for y_batch, task_model in zip(self.y_batch, self.task_models)]\n\n def define_accuracy(self):\n with tf.name_scope('Model_Accuracy'):\n mask = tf.cast(tf.range(self.batch_size)<self.num_data_in_batch, dtype=tf.float32)\n self.accuracy = [tf.reduce_sum(tf.cast(tf.equal(tf.argmax(task_model[-1], 1), tf.cast(y_batch, tf.int64)), tf.float32)*mask) for y_batch, task_model in zip(self.y_batch, self.task_models)]\n if self.task_is_new:\n self.accuracy_for_new_task = [tf.reduce_sum(tf.cast(tf.equal(tf.argmax(task_model, 1), tf.cast(self.y_batch[self.current_task], tf.int64)), tf.float32)*mask) for task_model in self.darts_eval_models]\n\n def define_opt(self):\n with tf.name_scope('Optimization'):\n reg_var = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n KB_reg_term2 = tf.contrib.layers.apply_regularization(self.KB_l2_reg, reg_var)\n TS_reg_term2 = tf.contrib.layers.apply_regularization(self.TS_l2_reg, reg_var)\n\n KB_grads = tf.gradients(self.loss[self.current_task] + KB_reg_term2, self.dfcnn_KB_trainable_param)\n KB_grads_vars = [(grad, param) for grad, param in zip(KB_grads, self.dfcnn_KB_trainable_param)]\n\n TS_grads = tf.gradients(self.loss[self.current_task] + TS_reg_term2, self.dfcnn_TS_trainable_param)\n TS_grads_vars = [(grad, param) for grad, param in zip(TS_grads, self.dfcnn_TS_trainable_param)]\n\n conv_grads = tf.gradients(self.loss[self.current_task], self.conv_trainable_param)\n conv_grads_vars = [(grad, param) for grad, param in zip(conv_grads, self.conv_trainable_param)]\n\n fc_grads = tf.gradients(self.loss[self.current_task], self.fc_trainable_param)\n fc_grads_vars = [(grad, param) for grad, param in zip(fc_grads, self.fc_trainable_param)]\n\n self.grads = list(KB_grads) + list(TS_grads) + list(conv_grads) + list(fc_grads)\n trainer = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate/(1.0+self.epoch*self.learn_rate_decay))\n self.update = trainer.apply_gradients(KB_grads_vars + TS_grads_vars + conv_grads_vars + fc_grads_vars)\n if self.task_is_new:\n if self.approx_order == 1:\n self.selection_grads = tf.gradients(self.loss[self.current_task], self.conv_select_params)\n elif self.approx_order == 2:\n raise NotImplementedError(\"Not Implemented because of 2nd derivative Issue!\")\n\n trainer2 = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate/(1.0+self.epoch*self.learn_rate_decay))\n self.selection_update = trainer2.apply_gradients(list(zip(self.selection_grads, self.conv_select_params)))\n\n def convert_tfVar_to_npVar(self, sess):\n if not (self.num_tasks == 1 and self.task_is_new):\n orig_KB = list(self.np_params[0]['KB']) ## copy of shared conv before training current task\n else:\n orig_KB = [None for _ in range(2*self.num_conv_layers)]\n\n def list_param_converter(list_of_params):\n converted_params = []\n for p in list_of_params:\n if type(p) == np.ndarray:\n converted_params.append(p)\n elif _tf_tensor(p):\n converted_params.append(sess.run(p))\n else:\n converted_params.append(p) ## append 'None' param\n return converted_params\n\n def double_list_param_converter(list_of_params):\n converted_params = []\n for task_params in list_of_params:\n converted_params.append(list_param_converter(task_params))\n return converted_params\n\n def post_process(layers_to_share, original_KB, updated_KB, updated_TS, updated_conv):\n for layer_cnt, (sharing_flag) in enumerate(layers_to_share):\n if sharing_flag:\n ### Sharing this layer -> use new KB, TS, and make conv param None\n updated_conv[self.current_task][2*layer_cnt], updated_conv[self.current_task][2*layer_cnt+1] = None, None\n else:\n ### Not sharing this layer -> roll back KB, make TS None, and keep conv param (no action needed)\n updated_KB[layer_cnt] = original_KB[layer_cnt]\n updated_TS[self.current_task][4*layer_cnt], updated_TS[self.current_task][4*layer_cnt+1] = None, None\n updated_TS[self.current_task][4*layer_cnt+2], updated_TS[self.current_task][4*layer_cnt+3] = None, None\n return updated_KB, updated_TS, updated_conv\n\n self.np_params = []\n if len(self.conv_sharing) < self.num_tasks:\n self.conv_sharing.append(self.darts_learned_selection(sess))\n np_KB = list_param_converter(self.dfcnn_KB_params)\n np_TS = double_list_param_converter(self.dfcnn_TS_params)\n np_conv = double_list_param_converter(self.conv_params)\n np_fc = double_list_param_converter(self.fc_params)\n\n np_KB, np_TS, np_conv = post_process(self.conv_sharing[self.current_task], orig_KB, np_KB, np_TS, np_conv)\n for t, c, f in zip(np_TS, np_conv, np_fc):\n self.np_params.append({'KB': np_KB, 'TS': t, 'TS_Conv': c, 'FC': f} if len(self.np_params)< 1 else {'TS': t, 'TS_Conv': c, 'FC': f})\n\n def _collect_trainable_variables(self):\n return_list = []\n for p in self.dfcnn_KB_params:\n if p is not None:\n return_list.append(p)\n for p in self.dfcnn_TS_params[-1]:\n if p is not None:\n return_list.append(p)\n for p in self.conv_params[-1]:\n if p is not None:\n return_list.append(p)\n for p in self.fc_params[-1]:\n if p is not None:\n return_list.append(p)\n return return_list\n\n def train_one_epoch(self, sess, data_x, data_y, epoch_cnt, task_index, learning_indices=None, augment_data=False, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_train = data_x.shape[0]\n if learning_indices is None:\n learning_indices = list(range(num_train))\n shuffle(learning_indices)\n\n for batch_cnt in range(num_train//self.batch_size):\n batch_train_x = data_x[learning_indices[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size]]\n batch_train_y = data_y[learning_indices[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size]]\n\n if self.task_is_new:\n ## Update architecture (selection param)\n sess.run(self.selection_update, feed_dict={self.model_input[task_model_index]: batch_train_x, self.true_output[task_model_index]: batch_train_y, self.epoch: epoch_cnt, self.dropout_prob: dropout_prob})\n\n ## Update NN weights\n sess.run(self.update, feed_dict={self.model_input[task_model_index]: batch_train_x, self.true_output[task_model_index]: batch_train_y, self.epoch: epoch_cnt, self.dropout_prob: dropout_prob})\n\n def eval_one_task(self, sess, data_x, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data, num_classes = data_x.shape[0], self.output_sizes[task_model_index]\n eval_output = np.zeros([num_data, num_classes], dtype=np.float32)\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n eval_func = self.eval_for_new_task[best_config]\n else:\n eval_func = self.eval[task_model_index]\n\n for batch_cnt in range(num_batch):\n eval_output[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size] = sess.run(eval_func, feed_dict={self.model_input: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n if num_remains > 0:\n temp_pred = sess.run(eval_func, feed_dict={self.model_input: data_x_add_dummy(data_x[-num_remains:], self.batch_size), self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n eval_output[-num_remains:] = temp_pred[0:num_remains]\n return eval_output\n\n def infer_one_task(self, sess, data_x, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data = data_x.shape[0]\n inferred_labels = np.zeros(num_data, dtype=np.int32)\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n pred_func = self.pred_for_new_task[best_config]\n else:\n pred_func = self.pred[task_model_index]\n\n for batch_cnt in range(num_batch):\n temp_pred = sess.run(pred_func, feed_dict={self.model_input[task_model_index]: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n inferred_labels[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size] = np.squeeze(temp_pred)\n if num_remains > 0:\n temp_pred = sess.run(pred_func, feed_dict={self.model_input[task_model_index]: data_x_add_dummy(data_x[-num_remains:], self.batch_size), self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n inferred_labels[-num_remains:] = np.squeeze(temp_pred[0:num_remains])\n return inferred_labels\n\n def compute_accuracy_one_task(self, sess, data_x, data_y, task_index, dropout_prob=1.0):\n task_model_index = self.find_task_model(task_index)\n num_data, accuracy = data_x.shape[0], 0.0\n\n num_batch = num_data//self.batch_size\n num_remains = num_data - self.batch_size*num_batch\n\n if self.task_is_new and (self.current_task == task_model_index):\n best_config = self.best_config(sess)\n acc_func = self.accuracy_for_new_task[best_config]\n else:\n acc_func = self.accuracy[task_model_index]\n\n for batch_cnt in range(num_batch):\n accuracy += sess.run(acc_func, feed_dict={self.model_input[task_model_index]: data_x[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.true_output[task_model_index]: data_y[batch_cnt*self.batch_size:(batch_cnt+1)*self.batch_size], self.dropout_prob: dropout_prob, self.num_data_in_batch: self.batch_size})\n if num_remains > 0:\n tmp_x, tmp_y = data_x_and_y_add_dummy(data_x[-num_remains:], data_y[-num_remains:], self.batch_size)\n accuracy += sess.run(acc_func, feed_dict={self.model_input[task_model_index]: tmp_x, self.true_output[task_model_index]: tmp_y, self.dropout_prob: dropout_prob, self.num_data_in_batch: num_remains})\n return float(accuracy)/float(num_data)"
]
| [
[
"tensorflow.range",
"numpy.zeros",
"numpy.squeeze",
"tensorflow.argmax",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.gradients",
"tensorflow.norm",
"numpy.argmax",
"tensorflow.name_scope",
"tensorflow.nn.softmax",
"tensorflow.contrib.layers.apply_regularization",
"tensorflow.__version__.split",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.get_collection",
"tensorflow.cast"
]
]
|
vroger11/audio_loader | [
"84af1dde0eac92369d03b2993f3ffa46b13a1f89"
]
| [
"audio_loader/ground_truth/c2si.py"
]
| [
"\"\"\"Tools helping with the C2SI corpus.\"\"\"\nimport re\n\nfrom glob import glob\nfrom os.path import join\n\nimport numpy as np\nimport pandas as pd\nimport soundfile as sf\n\nfrom audio_loader.ground_truth.challenge import Challenge\n\n\nEXPLICIT_TASKS = [\"maintained A\", \"reading\", \"describe an image\", \"spontaneus\", \"DAP\", \"Modality, Focus, Syntax or SVT\"]\nTASKS = [\"V\", \"L\", \"D\", \"P\", \"N\", \"S\"]\nGROUPS = {\n \"all\": [1, 2],\n \"patients\": [1],\n \"controls\": [2]\n}\n\nclass C2SI(Challenge):\n \"\"\"Ground getter for C2SI dataset.\"\"\"\n\n def __init__(self, c2si_root_folderpath, datapath=\"16k\",\n gtpath=\"BASE-Version-2020-01-05.xlsx\", sets=None,\n regression_scales=None,\n severity_score=False,\n intelligibility_score=False,\n targeted_tasks=['L'],\n group=\"all\"):\n \"\"\"Compatible with the C2SI corpus.\n\n Parameters:\n -----------\n c2si_root_folderpath: str\n Folder containing the C2SI data.\n\n datapath: str, optional\n Relative path of the data from the c2si_root_folderpath.\n\n gtpath: str, optional\n Relative path of the Exel file.\n\n sets: str, optional\n csv file which contains four columns:\n \"relative path\", \"train\", \"dev\", \"test\"\n if = None all data in data_folderpath are considered as a testing data\n\n regression_scales: list of intervals, optional\n Each scale give an output.\n\n severity_score: boolean, optional\n Output will be filled by the severity score.\n\n intelligibility_score: boolean, optional\n Output will be filled by the intelligibility score.\n\n targeted_tasks: list, optional\n List of tasks to use, possible values are \"V\", \"L\", \"D\", \"P\", \"N\", \"S\"\n \"L\" is for reading task.\n\n group: str, optional\n Corresponding group. Can be either \"all\", \"patients\" or \"controls\".\n \"\"\"\n super().__init__(c2si_root_folderpath, datapath, gtpath)\n self.regression_scales = regression_scales\n self.severity_score = severity_score\n self.intelligibility_score = intelligibility_score\n if sets is not None:\n self.df_data = pd.read_csv(sets, delimiter=\",\")\n else:\n data_absolute_path = join(c2si_root_folderpath, self.datapath)\n dic_data = {\"relative path\": [], \"train\": [], \"dev\": [], \"test\": []}\n for fn in glob(join(data_absolute_path, \"**/*.wav\"), recursive=True):\n dic_data[\"relative path\"].append(self.get_id(fn))\n\n dic_data[\"train\"] = np.full(len(dic_data[\"relative path\"]), False)\n dic_data[\"dev\"] = np.full(len(dic_data[\"relative path\"]), False)\n dic_data[\"test\"] = np.full(len(dic_data[\"relative path\"]), True)\n self.df_data = pd.DataFrame(data=dic_data)\n\n # add infos to the dataframe\n df_base = pd.read_excel(join(c2si_root_folderpath, gtpath), sheet_name=\"base\")\n # remove lines with no id\n df_base = df_base[df_base[\"ID-RUGBI\"].notnull()]\n\n nb_rows = self.df_data.shape[0]\n ids_rugbi = np.empty(nb_rows, dtype=int)\n sessions = np.empty(nb_rows, dtype=int)\n intel_scores = np.empty(nb_rows, dtype=float)\n sev_scores = np.empty(nb_rows, dtype=float)\n loc_records = np.empty(nb_rows, dtype=str)\n tasks = np.empty(nb_rows, dtype=str)\n groups = np.empty(nb_rows, dtype=float)\n sex = np.empty(nb_rows, dtype=float)\n ages = np.empty(nb_rows, dtype=float)\n for index, row in self.df_data.iterrows():\n filepath = row[\"relative path\"]\n loc_records[index], task, _, id_rugbi, session = get_infos(filepath)\n tasks[index] = task\n ids_rugbi[index] = id_rugbi\n sessions[index] = session\n\n gt_row_selected = df_base.loc[(df_base['ID-RUGBI'] == id_rugbi) & (df_base[\"enr2x\"] == session)]\n groups[index] = gt_row_selected[\"groupe\"].values[0]\n sex[index] = gt_row_selected[\"sexe\"].values[0]\n ages[index] = gt_row_selected[\"age\"].values[0]\n if task == \"L\":\n intel_scores[index] = gt_row_selected[\"intellec\"].values[0]\n sev_scores[index] = gt_row_selected[\"sevlec\"].values[0]\n else:\n intel_scores[index] = gt_row_selected[\"intel\"].values[0]\n sev_scores[index] = gt_row_selected[\"sev\"].values[0]\n\n self.df_data = self.df_data.assign(id_rugbi=ids_rugbi)\n self.df_data = self.df_data.assign(session=sessions)\n self.df_data = self.df_data.assign(loc_record=loc_records)\n self.df_data = self.df_data.assign(task=tasks)\n self.df_data = self.df_data.assign(intel=intel_scores)\n self.df_data = self.df_data.assign(sev=sev_scores)\n self.df_data = self.df_data.assign(group=groups)\n self.df_data = self.df_data.assign(sex=sex)\n self.df_data = self.df_data.assign(age=ages)\n\n # select taks and groups\n self.select_data(targeted_tasks, group)\n\n def select_data(self, targeted_tasks, group):\n \"\"\"Select data according to tasks and the group.\"\"\"\n selected_groups = self.df_data.group.isin(GROUPS[group])\n selected_tasks = self.df_data.task.isin(targeted_tasks)\n self.df_selected = self.df_data[selected_groups & selected_tasks]\n\n @property\n def training_set(self):\n \"\"\"Return array of filepaths from training set.\"\"\"\n train_list = self.df_selected[self.df_selected[\"train\"]][\"relative path\"].values\n return [filepath + \".wav\" for filepath in train_list]\n\n @property\n def devel_set(self):\n \"\"\"Return array of filepaths from development set.\"\"\"\n dev_list = self.df_selected[self.df_selected[\"dev\"]][\"relative path\"].values\n return [filepath + \".wav\" for filepath in dev_list]\n\n @property\n def testing_set(self):\n \"\"\"Return array of filepaths from testing set.\"\"\"\n test_list = self.df_selected[self.df_selected[\"test\"]][\"relative path\"].values\n return [filepath + \".wav\" for filepath in test_list]\n\n @property\n def gt_size(self):\n \"\"\"Return the size of the ground_truth.\"\"\"\n i = 0\n i += 1 if self.severity_score else 0\n i += 1 if self.intelligibility_score else 0\n\n if self.regression_scales is not None:\n return (len(self.regression_scales), i)\n\n return i\n\n def get_samples_time_in(self, filepath):\n \"\"\"Return a list of tuples corresponding to the start and end times of each sample.\n\n Parameters:\n -----------\n filepath: str\n Filepath of the audio file we want to get the ground truth times.\n \"\"\"\n info_file = sf.info(filepath)\n return [(0, info_file.samplerate*info_file.duration)]\n\n def _fill_output(self, id_audio, sample_begin, sample_end, output):\n \"\"\"Tool to fill an output array.\n\n Parameters\n ----------\n id_audio: str\n Id of the audio file, it is the relative path.\n\n sample_begin: integer > 0\n\n sample_end: integer > 0\n\n output: np.array\n Array to fill with ground truth (supposed zeros).\n \"\"\"\n selected_row = self.df_data[self.df_data[\"relative path\"] == id_audio]\n sev = selected_row[\"sev\"].values\n intel = selected_row[\"intel\"].values\n\n if self.regression_scales is not None:\n j = 0\n if self.severity_score:\n i = 0\n for interval in self.regression_scales:\n if interval[0] <= sev <= interval[1]:\n output[i, j] = 1\n break\n\n i += 1\n\n j = 1\n\n if self.intelligibility_score:\n i = 0\n for interval in self.regression_scales:\n if interval[0] <= intel <= interval[1]:\n output[i, j] = 1\n break\n\n i += 1\n\n else:\n i = 0\n if self.severity_score:\n output[0] = sev\n i = 1\n\n if self.intelligibility_score:\n output[i] = intel\n\n\n def get_output_description(self):\n \"\"\"Return a list that describe the output.\"\"\"\n output = []\n\n if self.regression_scales is not None:\n if self.severity_score:\n for interval in self.regression_scales:\n output.append([interval, \"severity\"])\n\n if self.intelligibility_score:\n for interval in self.regression_scales:\n output.append([interval, \"intelligibility\"])\n else:\n if self.severity_score:\n output.append(\"severity\")\n output.append(\"intelligibility\")\n\n\n return output\n\n def get_gt_for(self, filepath):\n \"\"\"Get tuples corresponding to the start, end times of each sample and\n the ground truth expected.\n\n Parameters:\n -----------\n filepath: str\n Filepath of the audio file we want to get the ground truth.\n \"\"\"\n raise Exception(\"Not yet implemented\")\n\n def get_majority_gt_at_sample(self, filepath, sample_begin, sample_end):\n \"\"\"Return an integer that represent the majority class for a specific sample.\"\"\"\n raise Exception(\"Not possible with the C2SI dataset\")\n\n\ndef get_infos(filepath):\n \"\"\"Return the infos related to the id.\"\"\"\n match = re.match(\"(.*)/(.*)-(.*)-(.*)-(.).*\", filepath)\n loc_record = match[2]\n session_number = match[4]\n task = match[5]\n id_base = match[1]\n id_rugbi = match[3]\n\n return loc_record, task, id_base, int(id_rugbi), int(session_number)\n\n\ndef create_splits(c2si_root_folderpath, output_filename, datapath=\"16k\", gtpath=\"base.xlsx\"):\n \"\"\"Create splits for C2SI corpus.\n\n Parameters\n ----------\n \"\"\"\n # TODO\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.empty"
]
]
|
CompbioLabUnist/dream_challenge-anti-pd1_response | [
"5b45bf1c7b94878024d35db5478c39286bc0da6c"
]
| [
"jwlee230/Program/Python/step01.py"
]
| [
"\"\"\"\nstep01.py: Read data from TIDEpy\n\"\"\"\nimport argparse\nimport pandas\nimport step00\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"input\", help=\"Input PKL file\", type=str)\n parser.add_argument(\"output\", help=\"Output tar.gz file\", type=str)\n\n args = parser.parse_args()\n\n data = pandas.read_pickle(args.input)\n step00.make_pickle(args.output, data)\n"
]
| [
[
"pandas.read_pickle"
]
]
|
haribharadwaj/mne-python | [
"4faf87f4ac2deb133a40554df02f0596046611a7"
]
| [
"mne/viz/backends/_notebook.py"
]
| [
"\"\"\"Notebook implementation of _Renderer and GUI.\"\"\"\n\n# Authors: Guillaume Favelier <[email protected]>\n#\n# License: Simplified BSD\n\nfrom contextlib import contextmanager, nullcontext\n\nfrom IPython.display import display\nfrom ipywidgets import (Button, Dropdown, FloatSlider, BoundedFloatText, HBox,\n IntSlider, IntText, Text, VBox, IntProgress, Play,\n Checkbox, RadioButtons, jsdlink)\n\nfrom ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar,\n _AbstractStatusBar, _AbstractLayout, _AbstractWidget,\n _AbstractWindow, _AbstractMplCanvas, _AbstractPlayback,\n _AbstractBrainMplCanvas, _AbstractMplInterface,\n _AbstractWidgetList)\nfrom ._pyvista import _PyVistaRenderer, _close_all, _set_3d_view, _set_3d_title # noqa: F401,E501, analysis:ignore\n\n\nclass _IpyLayout(_AbstractLayout):\n def _layout_initialize(self, max_width):\n self._layout_max_width = max_width\n\n def _layout_add_widget(self, layout, widget, stretch=0):\n widget.layout.margin = \"2px 0px 2px 0px\"\n if not isinstance(widget, Play):\n widget.layout.min_width = \"0px\"\n children = list(layout.children)\n children.append(widget)\n layout.children = tuple(children)\n # Fix columns\n if self._layout_max_width is not None and isinstance(widget, HBox):\n children = widget.children\n width = int(self._layout_max_width / len(children))\n for child in children:\n child.layout.width = f\"{width}px\"\n\n\nclass _IpyDock(_AbstractDock, _IpyLayout):\n def _dock_initialize(self, window=None, name=\"Controls\",\n area=\"left\"):\n self._dock_width = 300\n # XXX: this can be improved\n if hasattr(self, \"_dock\") and hasattr(self, \"_dock_layout\"):\n self._dock2 = self._dock\n self._dock_layout2 = self._dock_layout\n self._dock = self._dock_layout = VBox()\n self._dock.layout.width = f\"{self._dock_width}px\"\n self._layout_initialize(self._dock_width)\n\n def _dock_finalize(self):\n pass\n\n def _dock_show(self):\n self._dock_layout.layout.visibility = \"visible\"\n\n def _dock_hide(self):\n self._dock_layout.layout.visibility = \"hidden\"\n\n def _dock_add_stretch(self, layout=None):\n pass\n\n def _dock_add_layout(self, vertical=True):\n return VBox() if vertical else HBox()\n\n def _dock_add_label(self, value, align=False, layout=None):\n layout = self._dock_layout if layout is None else layout\n widget = Text(value=value, disabled=True)\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_button(self, name, callback, layout=None):\n layout = self._dock_layout if layout is None else layout\n widget = Button(description=name)\n widget.on_click(lambda x: callback())\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_named_layout(self, name, layout=None, compact=True):\n layout = self._dock_layout if layout is None else layout\n if name is not None:\n hlayout = self._dock_add_layout(not compact)\n self._dock_add_label(\n value=name, align=not compact, layout=hlayout)\n self._layout_add_widget(layout, hlayout)\n layout = hlayout\n return layout\n\n def _dock_add_slider(self, name, value, rng, callback,\n compact=True, double=False, layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n klass = FloatSlider if double else IntSlider\n widget = klass(\n value=value,\n min=rng[0],\n max=rng[1],\n readout=False,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_check_box(self, name, value, callback, layout=None):\n layout = self._dock_layout if layout is None else layout\n widget = Checkbox(\n value=value,\n description=name,\n disabled=False\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_spin_box(self, name, value, rng, callback,\n compact=True, double=True, step=None,\n layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n klass = BoundedFloatText if double else IntText\n widget = klass(\n value=value,\n min=rng[0],\n max=rng[1],\n )\n if step is not None:\n widget.step = step\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_combo_box(self, name, value, rng,\n callback, compact=True, layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n widget = Dropdown(\n value=value,\n options=rng,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_radio_buttons(self, value, rng, callback, vertical=True,\n layout=None):\n # XXX: vertical=False is not supported yet\n layout = self._dock_layout if layout is None else layout\n widget = RadioButtons(\n options=rng,\n value=value,\n disabled=False,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidgetList(widget)\n\n def _dock_add_group_box(self, name, layout=None):\n layout = self._dock_layout if layout is None else layout\n hlayout = VBox()\n self._layout_add_widget(layout, hlayout)\n return hlayout\n\n def _dock_add_text(self, name, value, placeholder, layout=None):\n layout = self._dock_layout if layout is None else layout\n widget = Text(value=value, placeholder=placeholder)\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_file_button(self, name, desc, func, value=None, save=False,\n directory=False, input_text_widget=True,\n placeholder=\"Type a file name\", layout=None):\n layout = self._dock_layout if layout is None else layout\n\n def callback():\n fname = self.actions[f\"{name}_field\"].value\n func(None if len(fname) == 0 else fname)\n hlayout = self._dock_add_layout(vertical=False)\n text_widget = self._dock_add_text(\n name=f\"{name}_field\",\n value=value,\n placeholder=placeholder,\n layout=hlayout,\n )\n button_widget = self._dock_add_button(\n name=desc,\n callback=callback,\n layout=hlayout,\n )\n self._layout_add_widget(layout, hlayout)\n return _IpyWidgetList([text_widget, button_widget])\n\n\ndef _generate_callback(callback, to_float=False):\n def func(data):\n value = data[\"new\"] if \"new\" in data else data[\"old\"]\n callback(float(value) if to_float else value)\n return func\n\n\nclass _IpyToolBar(_AbstractToolBar, _IpyLayout):\n def _tool_bar_load_icons(self):\n self.icons = dict()\n self.icons[\"help\"] = \"question\"\n self.icons[\"play\"] = None\n self.icons[\"pause\"] = None\n self.icons[\"reset\"] = \"history\"\n self.icons[\"scale\"] = \"magic\"\n self.icons[\"clear\"] = \"trash\"\n self.icons[\"movie\"] = \"video-camera\"\n self.icons[\"restore\"] = \"replay\"\n self.icons[\"screenshot\"] = \"camera\"\n self.icons[\"visibility_on\"] = \"eye\"\n self.icons[\"visibility_off\"] = \"eye\"\n\n def _tool_bar_initialize(self, name=\"default\", window=None):\n self.actions = dict()\n self._tool_bar = self._tool_bar_layout = HBox()\n self._layout_initialize(None)\n\n def _tool_bar_add_button(self, name, desc, func, icon_name=None,\n shortcut=None):\n icon_name = name if icon_name is None else icon_name\n icon = self.icons[icon_name]\n if icon is None:\n return\n widget = Button(tooltip=desc, icon=icon)\n widget.on_click(lambda x: func())\n self._layout_add_widget(self._tool_bar_layout, widget)\n self.actions[name] = widget\n\n def _tool_bar_update_button_icon(self, name, icon_name):\n self.actions[name].icon = self.icons[icon_name]\n\n def _tool_bar_add_text(self, name, value, placeholder):\n widget = Text(value=value, placeholder=placeholder)\n self._layout_add_widget(self._tool_bar_layout, widget)\n self.actions[name] = widget\n\n def _tool_bar_add_spacer(self):\n pass\n\n def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):\n def callback():\n fname = self.actions[f\"{name}_field\"].value\n func(None if len(fname) == 0 else fname)\n self._tool_bar_add_text(\n name=f\"{name}_field\",\n value=None,\n placeholder=\"Type a file name\",\n )\n self._tool_bar_add_button(\n name=name,\n desc=desc,\n func=callback,\n )\n\n def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):\n widget = Play(interval=500)\n self._layout_add_widget(self._tool_bar_layout, widget)\n self.actions[name] = widget\n return _IpyWidget(widget)\n\n def _tool_bar_set_theme(self, theme):\n pass\n\n\nclass _IpyMenuBar(_AbstractMenuBar):\n def _menu_initialize(self, window=None):\n pass\n\n def _menu_add_submenu(self, name, desc):\n pass\n\n def _menu_add_button(self, menu_name, name, desc, func):\n pass\n\n\nclass _IpyStatusBar(_AbstractStatusBar, _IpyLayout):\n def _status_bar_initialize(self, window=None):\n self._status_bar = self._status_bar_layout = HBox()\n self._layout_initialize(None)\n\n def _status_bar_show_message(self, value, timeout=5000):\n pass\n\n def _status_bar_add_label(self, value, stretch=0):\n widget = Text(value=value, disabled=True)\n self._layout_add_widget(self._status_bar_layout, widget)\n return _IpyWidget(widget)\n\n def _status_bar_add_progress_bar(self, stretch=0):\n widget = IntProgress()\n self._layout_add_widget(self._status_bar_layout, widget)\n return _IpyWidget(widget)\n\n def _status_bar_update(self):\n pass\n\n\nclass _IpyPlayback(_AbstractPlayback):\n def _playback_initialize(self, func, timeout, value, rng,\n time_widget, play_widget):\n play = play_widget._widget\n play.min = rng[0]\n play.max = rng[1]\n play.value = value\n slider = time_widget._widget\n jsdlink((play, 'value'), (slider, 'value'))\n jsdlink((slider, 'value'), (play, 'value'))\n\n\nclass _IpyMplInterface(_AbstractMplInterface):\n def _mpl_initialize(self):\n from matplotlib.backends.backend_nbagg import (FigureCanvasNbAgg,\n FigureManager)\n self.canvas = FigureCanvasNbAgg(self.fig)\n self.manager = FigureManager(self.canvas, 0)\n\n\nclass _IpyMplCanvas(_AbstractMplCanvas, _IpyMplInterface):\n def __init__(self, width, height, dpi):\n super().__init__(width, height, dpi)\n self._mpl_initialize()\n\n\nclass _IpyBrainMplCanvas(_AbstractBrainMplCanvas, _IpyMplInterface):\n def __init__(self, brain, width, height, dpi):\n super().__init__(brain, width, height, dpi)\n self._mpl_initialize()\n self._connect()\n\n\nclass _IpyWindow(_AbstractWindow):\n def _window_close_connect(self, func):\n pass\n\n def _window_get_dpi(self):\n return 96\n\n def _window_get_size(self):\n return self.figure.plotter.window_size\n\n def _window_get_simple_canvas(self, width, height, dpi):\n return _IpyMplCanvas(width, height, dpi)\n\n def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,\n separate_canvas):\n w, h = self._window_get_mplcanvas_size(interactor_fraction)\n self._interactor_fraction = interactor_fraction\n self._show_traces = show_traces\n self._separate_canvas = separate_canvas\n self._mplcanvas = _IpyBrainMplCanvas(\n brain, w, h, self._window_get_dpi())\n return self._mplcanvas\n\n def _window_adjust_mplcanvas_layout(self):\n pass\n\n def _window_get_cursor(self):\n pass\n\n def _window_set_cursor(self, cursor):\n pass\n\n def _window_new_cursor(self, name):\n pass\n\n @contextmanager\n def _window_ensure_minimum_sizes(self):\n yield\n\n def _window_set_theme(self, theme):\n pass\n\n\nclass _IpyWidgetList(_AbstractWidgetList):\n def __init__(self, src):\n self._src = src\n if isinstance(self._src, RadioButtons):\n self._widgets = _IpyWidget(self._src)\n else:\n self._widgets = list()\n for widget in self._src:\n if not isinstance(widget, _IpyWidget):\n widget = _IpyWidget(widget)\n self._widgets.append(widget)\n\n def set_enabled(self, state):\n if isinstance(self._src, RadioButtons):\n self._widgets.set_enabled(state)\n else:\n for widget in self._widgets:\n widget.set_enabled(state)\n\n def get_value(self, idx):\n if isinstance(self._src, RadioButtons):\n # for consistency, we do not use get_value()\n return self._widgets._widget.options[idx]\n else:\n return self._widgets[idx].get_value()\n\n def set_value(self, idx, value):\n if isinstance(self._src, RadioButtons):\n self._widgets.set_value(value)\n else:\n self._widgets[idx].set_value(value)\n\n\nclass _IpyWidget(_AbstractWidget):\n def set_value(self, value):\n if isinstance(self._widget, Button):\n self._widget.click()\n else:\n self._widget.value = value\n\n def get_value(self):\n return self._widget.value\n\n def set_range(self, rng):\n self._widget.min = rng[0]\n self._widget.max = rng[1]\n\n def show(self):\n self._widget.layout.visibility = \"visible\"\n\n def hide(self):\n self._widget.layout.visibility = \"hidden\"\n\n def set_enabled(self, state):\n self._widget.disabled = not state\n\n def update(self, repaint=True):\n pass\n\n\nclass _Renderer(_PyVistaRenderer, _IpyDock, _IpyToolBar, _IpyMenuBar,\n _IpyStatusBar, _IpyWindow, _IpyPlayback):\n _kind = 'notebook'\n\n def __init__(self, *args, **kwargs):\n self._dock = None\n self._tool_bar = None\n self._status_bar = None\n kwargs[\"notebook\"] = True\n super().__init__(*args, **kwargs)\n\n def _update(self):\n if self.figure.display is not None:\n self.figure.display.update_canvas()\n\n def _create_default_tool_bar(self):\n self._tool_bar_load_icons()\n self._tool_bar_initialize()\n self._tool_bar_add_file_button(\n name=\"screenshot\",\n desc=\"Take a screenshot\",\n func=self.screenshot,\n )\n\n def show(self):\n # default tool bar\n if self._tool_bar is None:\n self._create_default_tool_bar()\n display(self._tool_bar)\n # viewer\n viewer = self.plotter.show(\n jupyter_backend=\"ipyvtklink\", return_viewer=True)\n viewer.layout.width = None # unlock the fixed layout\n # main widget\n if self._dock is None:\n main_widget = viewer\n # XXX: this can be improved\n elif hasattr(self, \"_dock2\"):\n main_widget = HBox([self._dock2, viewer, self._dock])\n else:\n main_widget = HBox([self._dock, viewer])\n display(main_widget)\n self.figure.display = viewer\n # status bar\n if self._status_bar is not None:\n display(self._status_bar)\n return self.scene()\n\n\n_testing_context = nullcontext\n"
]
| [
[
"matplotlib.backends.backend_nbagg.FigureCanvasNbAgg",
"matplotlib.backends.backend_nbagg.FigureManager"
]
]
|
gkiar/C-PAC | [
"0926b451dd8622b25eb68c7bcc770f0156238b23"
]
| [
"CPAC/pipeline/cpac_cwas_pipeline.py"
]
| [
"import os\n\nimport nipype.interfaces.io as nio\nfrom CPAC.pipeline import nipype_pipeline_engine as pe\n\nfrom CPAC.utils.configuration import Configuration\n\n\ndef prep_cwas_workflow(c, subject_infos):\n print('Preparing CWAS workflow')\n p_id, s_ids, scan_ids, s_paths = (list(tup) for tup in zip(*subject_infos))\n print('Subjects', s_ids)\n\n wf = pe.Workflow(name='cwas_workflow')\n wf.base_dir = c.pipeline_setup['working_directory']['path']\n\n from CPAC.cwas import create_cwas\n import numpy as np\n regressor = np.loadtxt(c.cwasRegressorFile)\n\n cw = create_cwas()\n cw.inputs.inputspec.roi = c.cwasROIFile\n cw.inputs.inputspec.subjects = s_paths\n cw.inputs.inputspec.regressor = regressor\n cw.inputs.inputspec.cols = c.cwasRegressorCols\n cw.inputs.inputspec.f_samples = c.cwasFSamples\n cw.inputs.inputspec.strata = c.cwasRegressorStrata # will stay None?\n cw.inputs.inputspec.parallel_nodes = c.cwasParallelNodes\n\n ds = pe.Node(nio.DataSink(), name='cwas_sink')\n out_dir = os.path.dirname(s_paths[0]).replace(s_ids[0], 'cwas_results')\n ds.inputs.base_directory = out_dir\n ds.inputs.container = ''\n\n wf.connect(cw, 'outputspec.F_map',\n ds, 'F_map')\n wf.connect(cw, 'outputspec.p_map',\n ds, 'p_map')\n\n wf.run(plugin='MultiProc',\n plugin_args={'n_procs': c.numCoresPerSubject})\n\n\ndef run(config, subject_infos):\n import subprocess\n subprocess.getoutput('source ~/.bashrc')\n import os\n import pickle\n import yaml\n import yamlordereddictloader\n\n c = Configuration(yaml.safe_load(open(os.path.realpath(config), 'r')))\n\n prep_cwas_workflow(c, pickle.load(open(subject_infos, 'r') ))\n"
]
| [
[
"numpy.loadtxt"
]
]
|
abulte/pygpmf | [
"ac26d489f465df212e5e703881dc080d327d887b"
]
| [
"gpmf/parse.py"
]
| [
"from collections import namedtuple\nimport types\nimport struct\nimport logging\nimport numpy\n\n\nlogger = logging.getLogger(__name__)\n\nKLVItem = namedtuple(\"KLVItem\", [\"key\", \"length\", \"value\"])\nKLVLength = namedtuple(\"KLVLength\", [\"type\", \"size\", \"repeat\"])\n\n\ndef ceil4(x):\n \"\"\" Find the closest greater or equal multiple of 4\n\n Parameters\n ----------\n x: int\n The size\n\n Returns\n -------\n x_ceil: int\n The closest greater integer which is a multiple of 4.\n \"\"\"\n return (((x - 1) >> 2) + 1) << 2\n\n\nnum_types = {\n \"d\": (\"float64\", \"d\"),\n \"f\": (\"float32\", \"f\"),\n \"b\": (\"int8\", \"b\"),\n \"B\": (\"uint8\", \"B\"),\n \"s\": (\"int16\", \"h\"),\n \"S\": (\"uint16\", \"H\"),\n \"l\": (\"int32\", \"i\"),\n \"L\": (\"uint32\", \"I\"),\n \"j\": (\"int64\", \"q\"),\n \"J\": (\"uint64\", \"Q\")\n}\n\n\ndef parse_payload(x, fourcc, type_str, size, repeat):\n \"\"\" Parse the payload\n\n Parameters\n ----------\n x: byte\n The byte array corresponding to the payload\n fourcc: str\n The fourcc code\n type_str: str\n The type of the value\n size: int\n The size of the value\n repeat: int\n The number of times the value is repeated.\n\n Returns\n -------\n payload: object\n The parsed payload. the actual type depends on the type_str and the size and repeat values.\n \"\"\"\n if type_str == \"\\x00\":\n return iter_klv(x)\n else:\n x = x[:size * repeat]\n if type_str == \"c\":\n if fourcc == \"UNIT\":\n x = list(numpy.frombuffer(x, dtype=\"S%i\" % size))\n return [s.decode(\"latin1\") for s in x]\n else:\n return x.decode(\"latin1\")\n\n elif type_str in num_types:\n dtype, stype = num_types[type_str]\n dtype = numpy.dtype(\">\" + stype)\n a = numpy.frombuffer(x, dtype=dtype)\n type_size = dtype.itemsize\n dim1 = size // type_size\n\n if a.size == 1:\n a = a[0]\n elif dim1 > 1 and repeat > 1:\n a = a.reshape(repeat, dim1)\n return a\n elif type_str == \"U\":\n x = x.decode()\n year = \"20\" + x[:2]\n month = x[2:4]\n day = x[4:6]\n hours = x[6:8]\n mins = x[8:10]\n seconds = x[10:]\n return \"%s-%s-%s %s:%s:%s\" % (year, month, day, hours, mins, seconds)\n else:\n return x\n\n\ndef iter_klv(x):\n \"\"\" Iterate on KLV items.\n\n Parameters\n ----------\n x: byte\n The byte array corresponding to the stream.\n\n Returns\n -------\n klv_gen: generator\n A generator of (fourcc, (type_str, size, repeat), payload) tuples.\n \"\"\"\n start = 0\n\n while start < len(x):\n head = struct.unpack(\">cccccBH\", x[start: start + 8])\n fourcc = (b\"\".join(head[:4])).decode()\n type_str, size, repeat = head[4:]\n type_str = type_str.decode()\n start += 8\n payload_size = ceil4(size * repeat)\n payload = parse_payload(x[start: start + payload_size], fourcc, type_str, size, repeat)\n start += payload_size\n\n yield KLVItem(fourcc, KLVLength(type_str, size, repeat), payload)\n\n\ndef filter_klv(x, filter_fourcc):\n \"\"\"Filter only KLV items with chosen fourcc code.\n\n Parameters\n ----------\n x: byte\n The input stream\n filter_fourcc: list of str\n A list of FourCC codes\n\n Returns\n -------\n klv_gen: generator\n De-nested generator of (fourcc, (type_str, size, repeat), payload) with only chosen fourcc\n \"\"\"\n generators = [iter(iter_klv(x))]\n\n while len(generators) > 0:\n it = generators[-1]\n try:\n (fourcc, (type_str, size, repeat), payload) = next(it)\n if fourcc in filter_fourcc:\n yield KLVItem(fourcc, KLVLength(type_str, size, repeat), payload)\n if type_str == \"\\x00\":\n generators.append(iter(payload))\n except StopIteration:\n generators = generators[:-1]\n\n\ndef _expand_klv(x):\n if isinstance(x, types.GeneratorType):\n return [\n KLVItem(fourcc, type_size_repeat, _expand_klv(payload))\n for fourcc, type_size_repeat, payload\n in x\n ]\n else:\n return x\n\n\ndef expand_klv(x):\n \"\"\"Expand the klv items\n\n Convert generators of klv items produced by `iter_klv` to lists.\n\n Parameters\n ----------\n x\n\n Returns\n -------\n\n \"\"\"\n return _expand_klv(iter_klv(x))\n"
]
| [
[
"numpy.dtype",
"numpy.frombuffer"
]
]
|
isequeir/jdsypymeasure | [
"0bd7e02a3578cdb733abac91b32361c4b32b6686"
]
| [
"pymeasure/instruments/rigol/rigolds1202ze.py"
]
| [
"#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\nimport numpy as np\nimport os\nfrom pymeasure.instruments import Instrument\nfrom pymeasure.instruments.validators import strict_discrete_set, strict_range\n\n\nclass RigolDS1202ZE_Channel():\n \"\"\" Modification of the pymeasure Keysight DSOX1102G Oscilloscope class.\n Made to implement within the RigolDS1202ZE scope class \"\"\"\n\n BOOLS = {True: 1, False: 0}\n\n bwlimit = Instrument.control(\n \"BWLimit?\", \"BWLimit %d\",\n \"\"\" A boolean parameter that toggles 20 MHz internal low-pass filter.\"\"\",\n validator=strict_discrete_set,\n values=BOOLS,\n map_values=True\n )\n\n coupling = Instrument.control(\n \"COUPling?\", \"COUPling %s\",\n \"\"\" A string parameter that determines the coupling (\"ac\" or \"dc\").\"\"\",\n validator=strict_discrete_set,\n values={\"ac\": \"AC\", \"dc\": \"DC\", \"gnd\": \"GND\"},\n map_values=True\n )\n\n # display = Instrument.control(\n # \"DISPlay?\", \"DISPlay %d\",\n # \"\"\" A boolean parameter that toggles the display.\"\"\",\n # validator=strict_discrete_set,\n # values=BOOLS,\n # map_values=True\n # )\n\n invert = Instrument.control(\n \"INVert?\", \"INVert %d\",\n \"\"\" A boolean parameter that toggles the inversion of the input signal.\"\"\",\n validator=strict_discrete_set,\n values=BOOLS,\n map_values=True\n )\n\n # label = Instrument.control(\n # \"LABel?\", 'LABel \"%s\"',\n # \"\"\" A string to label the channel. Labels with more than 10 characters are truncated to 10\n # characters. May contain commonly used ASCII characters. Lower case characters are converted\n # to upper case.\"\"\",\n # get_process=lambda v: str(v[1:-1])\n # )\n\n offset = Instrument.control(\n \"OFFSet?\", \"OFFSet %f\",\n \"\"\" A float parameter to set value that is represented at center of screen in \n Volts. The range of legal values varies depending on range and scale. If the specified value \n is outside of the legal range, the offset value is automatically set to the nearest legal value. \"\"\"\n )\n\n probe_attenuation = Instrument.control(\n \"PROBe?\", \"PROBe %f\",\n \"\"\" A float parameter that specifies the probe attenuation. The probe attenuation\n may be from 0.1 to 10000.\"\"\",\n validator=strict_discrete_set,\n values=[0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]\n )\n\n range = Instrument.control(\n \"RANGe?\", \"RANGe %f\",\n \"\"\" A float parameter that specifies the full-scale vertical axis in Volts.\n When using 1:1 probe attenuation, legal values for the range are from 8 mV to 40V.\"\"\"\n )\n\n scale = Instrument.control(\n \"SCALe?\", \"SCALe %f\",\n \"\"\" A float parameter that specifies the vertical scale, or units per division, in Volts.\"\"\"\n )\n\n def __init__(self, instrument, number):\n self.instrument = instrument\n self.number = number\n\n def values(self, command, **kwargs):\n \"\"\" Reads a set of values from the instrument through the adapter,\n passing on any key-word arguments.\n \"\"\"\n return self.instrument.values(\":channel%d:%s\" % (\n self.number, command), **kwargs)\n\n def ask(self, command):\n self.instrument.ask(\":channel%d:%s\" % (self.number, command))\n\n def write(self, command):\n self.instrument.write(\":channel%d:%s\" % (self.number, command))\n\n def setup(self, bwlimit=None, coupling=None, display=None, invert=None, label=None, offset=None,\n probe_attenuation=None, vertical_range=None, scale=None):\n \"\"\" Setup channel. Unspecified settings are not modified. Modifying values such as\n probe attenuation will modify offset, range, etc. Refer to oscilloscope documentation and make\n multiple consecutive calls to setup() if needed.\n :param bwlimit: A boolean, which enables 25 MHz internal low-pass filter.\n :param coupling: \"ac\" or \"dc\".\n :param display: A boolean, which enables channel display.\n :param invert: A boolean, which enables input signal inversion.\n :param label: Label string with max. 10 characters, may contain commonly used ASCII characters.\n :param offset: Numerical value represented at center of screen, must be inside the legal range.\n :param probe_attenuation: Probe attenuation values from 0.1 to 1000.\n :param vertical_range: Full-scale vertical axis of the selected channel. When using 1:1 probe\n attenuation, legal values for the range are from 8mV to 40 V. If the\n probe attenuation is changed, the range value is multiplied by the\n probe attenuation factor.\n :param scale: Units per division. \"\"\"\n\n if vertical_range is not None and scale is not None:\n log.warning('Both \"vertical_range\" and \"scale\" are specified. Specified \"scale\" has priority.')\n\n if probe_attenuation is not None: self.probe_attenuation = probe_attenuation\n if bwlimit is not None: self.bwlimit = bwlimit\n if coupling is not None: self.coupling = coupling\n if display is not None: self.display = display\n if invert is not None: self.invert = invert\n if label is not None: self.label = label\n if offset is not None: self.offset = offset\n if vertical_range is not None: self.range = vertical_range\n if scale is not None: self.scale = scale\n\n @property\n def current_configuration(self):\n \"\"\" Read channel configuration as a dict containing the following keys:\n - \"CHAN\": channel number (int)\n - \"OFFS\": vertical offset (float)\n - \"RANG\": vertical range (float)\n - \"COUP\": \"dc\" or \"ac\" coupling (str)\n - \"IMP\": input impedance (str)\n - \"DISP\": currently displayed (bool)\n - \"BWL\": bandwidth limiting enabled (bool)\n - \"INV\": inverted (bool)\n - \"UNIT\": unit (str)\n - \"PROB\": probe attenuation (float)\n - \"PROB:SKEW\": skew factor (float)\n - \"STYP\": probe signal type (str)\n \"\"\"\n\n # Using the instrument's ask method because Channel.ask() adds the prefix \":channelX:\", and to query the\n # configuration details, we actually need to ask \":channelX?\", without a second \":\"\n ch_setup_raw = self.instrument.ask(\":channel%d?\" % self.number).strip(\"\\n\")\n\n # ch_setup_raw hat the following format:\n # :CHAN1:RANG +40.0E+00;OFFS +0.00000E+00;COUP DC;IMP ONEM;DISP 1;BWL 0;\n # INV 0;LAB \"1\";UNIT VOLT;PROB +10E+00;PROB:SKEW +0.00E+00;STYP SING\n\n # Cut out the \":CHANx:\" at beginning and split string\n ch_setup_splitted = ch_setup_raw[7:].split(\";\")\n\n # Create dict of setup parameters\n ch_setup_dict = dict(map(lambda v: v.split(\" \"), ch_setup_splitted))\n\n # Add \"CHAN\" key\n ch_setup_dict[\"CHAN\"] = ch_setup_raw[5]\n\n # Convert values to specific type\n to_str = [\"COUP\", \"IMP\", \"UNIT\", \"STYP\"]\n to_bool = [\"DISP\", \"BWL\", \"INV\"]\n to_float = [\"OFFS\", \"PROB\", \"PROB:SKEW\", \"RANG\"]\n to_int = [\"CHAN\"]\n for key in ch_setup_dict:\n if key in to_str:\n ch_setup_dict[key] = str(ch_setup_dict[key])\n elif key in to_bool:\n ch_setup_dict[key] = (ch_setup_dict[key] == \"1\")\n elif key in to_float:\n ch_setup_dict[key] = float(ch_setup_dict[key])\n elif key in to_int:\n ch_setup_dict[key] = int(ch_setup_dict[key])\n return ch_setup_dict\n\n\nclass RigolDS1202ZE(Instrument):\n \"\"\" modification of the pymeasure Keysight DSOX1102G Oscilloscope class for the Rigol DS1202Z-E\n Refer to the Rigol DS1202Z-E Oscilloscope Programmer's Guide for further details about\n using the lower-level methods to interact directly with the scope.\n .. code-block:: python\n\n scope = KeysightDSOX1102G(resource)\n scope.autoscale()\n ch1_data_array, ch1_preamble = scope.download_data(source=\"channel1\", points=2000)\n # ...\n scope.shutdown()\n Known issues:\n\n - The digitize command will be completed before the operation is. May lead to\n VI_ERROR_TMO (timeout) occuring when sending commands immediately after digitize.\n Current fix: if deemed necessary, add delay between digitize and follow-up command\n to scope.\n \"\"\"\n\n BOOLS = {True: 1, False: 0}\n\n def __init__(self, adapter, **kwargs):\n super(RigolDS1202ZE, self).__init__(\n adapter, \"Rigol DS1202Z-E Oscilloscope\", **kwargs\n )\n # Account for setup time for timebase_mode, waveform_points_mode\n self.adapter.connection.timeout = 6000\n self.ch1 = RigolDS1202ZE_Channel(self, 1)\n self.ch2 = RigolDS1202ZE_Channel(self, 2)\n\n #################\n # Channel setup #\n #################\n\n def autoscale(self):\n \"\"\" Autoscale displayed channels. \"\"\"\n self.write(\":autoscale\")\n\n ##################\n # Timebase Setup #\n ##################\n\n # @property\n # def timebase(self):\n # \"\"\" Read timebase setup as a dict containing the following keys:\n # - \"REF\": position on screen of timebase reference (str)\n # - \"MAIN:RANG\": full-scale timebase range (float)\n # - \"POS\": interval between trigger and reference point (float)\n # - \"MODE\": mode (str)\"\"\"\n # return self._timebase()\n\n timebase_mode = Instrument.control(\n \":TIMebase:MODE?\", \":TIMebase:MODE %s\",\n \"\"\" A string parameter that sets the current time base. Can be \"main\", \n \"window\", \"xy\", or \"roll\" corresponding to YT, XY, ROLL.\"\"\",\n validator=strict_discrete_set,\n values={\"main\": \"MAIN\", \"window\": \"WIND\", \"xy\": \"XY\", \"roll\": \"ROLL\"},\n map_values=True\n )\n\n timebase_offset = Instrument.control(\n \":TIMebase:MAIN:OFFSet?\", \":TIMebase:MAIN:OFFSet %f\",\n \"\"\" A float parameter that sets the time interval in seconds between the trigger \n event and the reference position (at center of screen by default).\"\"\"\n )\n\n # timebase_range = Instrument.control(\n # \":TIMebase:RANGe?\", \":TIMebase:RANGe %f\",\n # \"\"\" A float parameter that sets the full-scale horizontal time in seconds for the\n # main window.\"\"\"\n # )\n\n timebase_scale = Instrument.control(\n \":TIMebase:MAIN:SCALe?\", \":TIMebase:MAIN:SCALe %f\",\n \"\"\" A float parameter that sets the horizontal scale (units per division) in seconds \n for the main window.\"\"\"\n )\n\n ###############\n # Acquisition #\n ###############\n\n acquisition_averages = Instrument.control(\n \":ACQuire:AVERages?\", \":ACQuire:AVERages %i\",\n \"\"\" An integer parameter that sets the number of averages. Can be 2^n where n is integer from 1 to 10.\"\"\",\n validator=strict_discrete_set,\n values=[2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n )\n\n acquisition_type = Instrument.control(\n \":ACQuire:TYPE?\", \":ACQuire:TYPE %s\",\n \"\"\" A string parameter that sets the type of data acquisition. Can be \"normal\", \"average\",\n \"hresolution\", or \"peak\".\"\"\",\n validator=strict_discrete_set,\n values={\"normal\": \"NORM\", \"average\": \"AVER\", \"hresolution\": \"HRES\", \"peak\": \"PEAK\"},\n map_values=True\n )\n\n # need to add if statement checking if 1 or 2 channels are enabled bec if so the values if two channels are\n # enabled is half that of only 1 being enabled\n acquisition_memory_depth = Instrument.control(\n \":ACQuire:MDEPth?\", \":ACQuire:MDEPth %s\", \":ACQuire:MDEPth %i\"\n \"\"\" An integer parameter that sets the number of waveform points to be transferred with\n the waveform_data method. Can be any of the following values if 1 channel is enabled\n \"AUTO\", 12000, 120000, 1200000, 12000000, 24000000\n or if two channels are enbled can take values :\"AUTO\", 6000, 60000, 600000, 6000000,12000000\n Note that the oscilloscope may provide less than the specified nb of points. \n \n Memory Depth = Sample Rate x Waveform Length\n Wherein, the Waveform Length is the product of the horizontal timebase (set by\n the :TIMebase[:MAIN]:SCALe command) times the number of grids in the horizontal\n direction on the screen (12 for DS1000Z-E).\n When AUTO is selected, the oscilloscope will select the memory depth automatically\n according to the current sample rate\"\"\",\n validator=strict_discrete_set,\n values=[\"AUTO\", 12000, 120000, 1200000, 12000000, 24000000]\n )\n\n acquisition_sample_rate = Instrument.control(\n \":ACQuire:SRate?\",\n \"\"\"Query the current sample rate. The default unit is Sa/s. Sample rate is the sample \n frequency of the oscilloscope, namely the waveform points sampled per second.\n The following equation describes the relationship among memory depth, sample rate, and waveform length:\n \n Memory Depth = Sample Rate x Waveform Length\n \n Wherein, the Memory Depth can be set using the :ACQuire:MDEPth command, and\n the Waveform Length is the product of the horizontal timebase (set by\n the :TIMebase[:MAIN]:SCALe command) times the number of the horizontal scales\n (12 for DS1000Z-E)..\"\"\", docs=None\n )\n\n def run(self):\n \"\"\" Starts repetitive acquisitions. This is the same as pressing the Run key on the front panel.\"\"\"\n self.write(\":run\")\n\n def stop(self):\n \"\"\" Stops the acquisition. This is the same as pressing the Stop key on the front panel.\"\"\"\n self.write(\":stop\")\n\n def single(self):\n \"\"\" Causes the instrument to acquire a single trigger of data.\n This is the same as pressing the Single key on the front panel. \"\"\"\n self.write(\":single\")\n\n def force_trigger(self):\n \"\"\"Generate a trigger signal forcefully. This command is only applicable to the normal and\n single trigger modes (see the :TRIGger:SWEep command) and is equivalent to pressing\n the FORCE key in the trigger control area on the front panel.\"\"\"\n self.write(\":TFORCE\")\n\n # _digitize = Instrument.setting(\n # \":DIGitize %s\",\n # \"\"\" Acquire waveforms according to the settings of the :ACQuire commands and specified source,\n # as a string parameter that can take the following values: \"channel1\", \"channel2\", \"function\",\n # \"math\", \"fft\", \"abus\", or \"ext\". \"\"\",\n # validator=strict_discrete_set,\n # values={\"channel1\": \"CHAN1\", \"channel2\": \"CHAN2\", \"function\": \"FUNC\", \"math\": \"MATH\",\n # \"fft\": \"FFT\", \"abus\": \"ABUS\", \"ext\": \"EXT\"},\n # map_values=True\n # )\n\n # def digitize(self, source: str):\n # \"\"\" Acquire waveforms according to the settings of the :ACQuire commands. Ensure a delay\n # between the digitize operation and further commands, as timeout may be reached before\n # digitize has completed.\n # :param source: \"channel1\", \"channel2\", \"function\", \"math\", \"fft\", \"abus\", or \"ext\".\"\"\"\n # self._digitize = source\n\n waveform_points_mode = Instrument.control(\n \":waveform:mode?\", \":waveform:mode %s\",\n \"\"\" A string parameter that sets the data record to be transferred with the waveform_data\n method. Can be \"normal\", \"maximum\", or \"raw\".\"\"\",\n validator=strict_discrete_set,\n values={\"normal\": \"NORM\", \"maximum\": \"MAX\", \"raw\": \"RAW\"},\n map_values=True\n )\n waveform_source = Instrument.control(\n \":waveform:source?\", \":waveform:source %s\",\n \"\"\" A string parameter that selects the analog channel, function, or reference waveform \n to be used as the source for the waveform methods. Can be \"channel1\", \"channel2\", \"math\"\"\",\n validator=strict_discrete_set,\n values={\"channel1\": \"CHAN1\", \"channel2\": \"CHAN2\", \"math\": \"MATH\"},\n map_values=True\n )\n waveform_format = Instrument.control(\n \":waveform:format?\", \":waveform:format %s\",\n \"\"\" A string parameter that controls how the data is formatted when sent from the \n oscilloscope. Can be \"ascii\", \"word\" or \"byte\". Words are transmitted in big endian by default.\"\"\",\n validator=strict_discrete_set,\n values={\"ascii\": \"ASC\", \"word\": \"WORD\", \"byte\": \"BYTE\"},\n map_values=True\n )\n\n @property\n def waveform_preamble(self):\n \"\"\" Get preamble information for the selected waveform source as a dict with the following keys:\n - \"format\": byte, word, or ascii (str)\n - \"type\": 0 (NORMal), 1 (MAXimum) or 2 (RAW) (str)\n - \"points\": an integer between 1 and 24000000(int)\n - \"count\": the number of averages in the average sample mode and 1 in other modes (int)\n - \"xincrement\": the time difference between two neighboring points in the X direction (float)\n - \"xorigin\": first data point in memory (float)\n - \"xreference\": data point associated with xorigin (int)\n - \"yincrement\": voltage difference between data points (float)\n - \"yorigin\": the vertical offset relative to the \"Vertical Reference Position\" in the Y\n direction. ie voltage at center of screen (float)\n - \"yreference\": data point associated with yorigin (int)\"\"\"\n return self._waveform_preamble()\n\n #@property\n def waveform_data(self, source, points=1000, filename=None):\n #TODO: need to figure out how to read data in byte format, currently it is only in ascii format which allows\n # for less elements to be gathered from the scopes buffer.. see the Tip below for further explanation\n\n \"\"\" Get the binary block of sampled data points transmitted using the IEEE 488.2 arbitrary\n block data format.\n\n Tip:(taken directly from Rigol DS1202ZE programming manual)\n When reading the waveform data in the internal memory, the maximum number\n of waveform points can be read each time the :WAV:DATA? command is sent is\n related to the return format of the waveform data currently selected, as shown\n in the table below.\n\n Return Format of the Waveform Data | Maximum Number of Waveform Points can be Read Each Time\n (note can not necessarily read the entire buffer on one read depending on the waveform format)\n BYTE | BYTE 250000\n WORD | WORD 125000\n ASCii | ASCii 15625\n\n Before reading the waveform data in the internal memory, you need to judge\n whether the waveform data can all be read at one time according to the memory\n depth of the oscilloscope and the maximum number of waveform points that can\n be read each time (refer to the table above).\n\n 1) When the memory depth of the oscilloscope is lower than or equal to the\n maximum number of waveform points that can be read each time, the\n waveform data in the internal memory can all be read at one time by\n specifying the start point and stop point.\n\n 2) When the memory depth of the oscilloscope is greater than the maximum\n number of waveform points that can be read each time, the waveform data\n in the internal memory need to be read in several batches by specifying the\n start point and stop point. Each time, only the waveform data in one area of\n the internal memory is read (the waveform data of two neighbouring areas\n are continuous); then, you need to combine the waveform data that are\n read separately in sequence.\"\"\"\n\n # Other waveform formats raise UnicodeDecodeError\n\n self.run()\n self.write(\":STOP\")\n self.waveform_source = source\n self.waveform_format = \"ascii\"\n self.waveform_points_mode = \"normal\"\n self.acquisition_memory_depth = points\n\n preamble = self.waveform_preamble\n\n\n max_num_pts = 15625\n num_blocks = preamble['points'] // max_num_pts\n last_block_pts = preamble['points'] % max_num_pts\n\n datas = []\n for i in range(num_blocks + 1):\n if i < num_blocks:\n self.write(':wav:star %i' % (1 + i * 250000))\n self.write(':wav:stop %i' % (250000 * (i + 1)))\n else:\n if last_block_pts:\n self.write(':wav:star %i' % (1 + num_blocks * 250000))\n self.write(':wav:stop %i' % (num_blocks * 250000 + last_block_pts))\n else:\n break\n #first value is a string in ascii format\n data = self.values(':wav:data?')[1:]\n datas.append(data)\n\n datas = np.concatenate(datas)\n #reads in ascii format dont need to account for offsets on scope???? that is why below line\n #voltage_array = (datas - preamble['yorigin'] - preamble['yreference']) * preamble['yincrement']\n voltage_array = datas\n\n time_array = np.arange(0, preamble['points'] * preamble['xincrement'], preamble['xincrement'])\n # info['xorigin'] + info['xreference']\n\n if filename:\n try:\n os.remove(filename)\n except OSError:\n pass\n np.savetxt(filename, np.c_[time_array, voltage_array], '%.12e', ',')\n\n #when using the ascii mode the first value give when you query data? is an a valid waveform point but is a\n #string for some reason.. therefore to output arrays of equal size i just take off the fist point\n # of both voltage and time array\n return time_array[1:], voltage_array\n\n ################\n # System Setup #\n ################\n\n @property\n def system_setup(self):\n \"\"\" A string parameter that sets up the oscilloscope. Must be in IEEE 488.2 format.\n It is recommended to only set a string previously obtained from this command.\"\"\"\n return self.ask(\":system:setup?\")\n\n @system_setup.setter\n def system_setup(self, setup_string):\n self.write(\":system:setup \" + setup_string)\n\n def ch(self, channel_number):\n if channel_number == 1:\n return self.ch1\n elif channel_number == 2:\n return self.ch2\n else:\n raise ValueError(\"Invalid channel number. Must be 1 or 2.\")\n\n def check_errors(self):\n \"\"\" Read all errors from the instrument.\"\"\"\n while True:\n err = self.values(\":SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Rigol DS1202ZE: %s: %s\" % (err[0], err[1])\n log.error(errmsg + \"\\n\")\n else:\n break\n\n def clear_status(self):\n \"\"\" Clear device status. \"\"\"\n self.write(\":CLEAR\")\n\n def factory_reset(self):\n \"\"\" Factory default setup, no user settings remain unchanged. \"\"\"\n self.write(\"*RST\")\n\n def default_setup(self):\n \"\"\" Default setup, some user settings (like preferences) remain unchanged. \"\"\"\n self.write(\":SYSTem:PRESet\")\n\n def timebase_setup(self, mode=None, offset=None, horizontal_range=None, scale=None):\n \"\"\" Set up timebase. Unspecified parameters are not modified. Modifying a single parameter might\n impact other parameters. Refer to oscilloscope documentation and make multiple consecutive calls\n to channel_setup if needed.\n :param mode: Timebase mode, can be \"main\", \"window\", \"xy\", or \"roll\".\n :param offset: Offset in seconds between trigger and center of screen.\n :param horizontal_range: Full-scale range in seconds.\n :param scale: Units-per-division in seconds.\"\"\"\n\n if mode is not None: self.timebase_mode = mode\n if offset is not None: self.timebase_offset = offset\n if horizontal_range is not None: self.timebase_range = horizontal_range\n if scale is not None: self.timebase_scale = scale\n\n def download_image(self, format_=\"png\", color_palette=\"color\"):\n \"\"\" Get image of oscilloscope screen in bytearray of specified file format.\n :param format_: \"bmp\", \"bmp8bit\", or \"png\"\n :param color_palette: \"color\" or \"grayscale\"\n \"\"\"\n query = f\":DISPlay:DATA? {format_}, {color_palette}\"\n # Using binary_values query because default interface does not support binary transfer\n img = self.binary_values(query, header_bytes=10, dtype=np.uint8)\n return bytearray(img)\n\n # def get_data(self, source, points=62500):\n # \"\"\" Get data from specified source of oscilloscope. Returned objects are a np.ndarray of data\n # values (no temporal axis) and a dict of the waveform preamble, which can be used to build the\n # corresponding time values for all data points.\n # Multimeter will be stopped for proper acquisition.\n # :param source: measurement source, can be \"channel1\", \"channel2\", \"function\", \"fft\", \"wmemory1\",\n # \"wmemory2\", or \"ext\".\n # :param points: integer number of points to acquire. Note that oscilloscope may return less points than\n # specified, this is not an issue of this library. Can be 100, 250, 500, 1000,\n # 2000, 5000, 10000, 20000, 50000, or 62500.\n # :return data_ndarray, waveform_preamble_dict: see waveform_preamble property for dict format.\n # \"\"\"\n # # TODO: Consider downloading from multiple sources at the same time.\n # self.waveform_source = source\n # self.waveform_points_mode = \"normal\"\n # self.waveform_points = points\n #\n # data_bytes = self.values(':wav:data?')\n # return data_bytes\n\n # def _timebase(self):\n # \"\"\"\n # Reads setup data from timebase and converts it to a more convenient dict of values.\n # \"\"\"\n # tb_setup_raw = self.ask(\":timebase?\").strip(\"\\n\")\n #\n # # tb_setup_raw hat the following format:\n # # :TIM:MODE MAIN;REF CENT;MAIN:RANG +1.00E-03;POS +0.0E+00\n #\n # # Cut out the \":TIM:\" at beginning and split string\n # tb_setup_splitted = tb_setup_raw[5:].split(\";\")\n #\n # # Create dict of setup parameters\n # tb_setup = dict(map(lambda v: v.split(\" \"), tb_setup_splitted))\n #\n # # Convert values to specific type\n # to_str = [\"MODE\", \"REF\"]\n # to_float = [\"MAIN:RANG\", \"POS\"]\n # for key in tb_setup:\n # if key in to_str:\n # tb_setup[key] = str(tb_setup[key])\n # elif key in to_float:\n # tb_setup[key] = float(tb_setup[key])\n #\n # return tb_setup\n\n def _waveform_preamble(self):\n \"\"\"\n Reads waveform preamble and converts it to a more convenient dict of values.\n \"\"\"\n vals = self.values(\":waveform:preamble?\")\n # Get values to dict\n vals_dict = dict(zip([\"format\", \"type\", \"points\", \"count\", \"xincrement\", \"xorigin\",\n \"xreference\", \"yincrement\", \"yorigin\", \"yreference\"], vals))\n # Map element values\n format_map = {0: \"BYTE\", 1: \"WORD\", 2: \"ASC\"}\n type_map = {0: \"NORMal\", 1: \"MAXimum\", 2: \"RAW\"}\n vals_dict[\"format\"] = format_map[int(vals_dict[\"format\"])]\n vals_dict[\"type\"] = type_map[int(vals_dict[\"type\"])]\n\n # Correct types\n to_int = [\"points\", \"count\", \"xreference\", \"yreference\"]\n to_float = [\"xincrement\", \"xorigin\", \"yincrement\", \"yorigin\"]\n for key in vals_dict:\n if key in to_int:\n vals_dict[key] = int(vals_dict[key])\n elif key in to_float:\n vals_dict[key] = float(vals_dict[key])\n\n return vals_dict\n\n\n#from jdsypymeasure.pymeasure.instruments.rigol.rigolds1202ze import RigolDS1202ZE ;\nfrom jdsypymeasure.pymeasure.adapters.visa import VISAAdapter ;\nscope_adapter = VISAAdapter(\"RigolDS1202ZE\") ;\nscope = RigolDS1202ZE(adapter=scope_adapter) ;\ntime, voltage = scope.waveform_data(\"channel2\", points=12000)\ntime2, voltage2 = scope.waveform_data(\"channel2\", points=12000)\nimport matplotlib.pyplot as plt\n\nplt.plot(time,voltage)\nplt.show()\n\n"
]
| [
[
"numpy.concatenate",
"numpy.savetxt",
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show"
]
]
|
Pranjal31/cuml | [
"ea9079ba2b9f6cd5416a7b0a13f9af4a384a8eb9"
]
| [
"python/cuml/test/test_random_forest.py"
]
| [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport pytest\n\nfrom cuml.ensemble import RandomForestClassifier as curfc\nfrom cuml.ensemble import RandomForestRegressor as curfr\nfrom cuml.metrics import r2_score\nfrom cuml.test.utils import get_handle, unit_param, \\\n quality_param, stress_param\n\nfrom sklearn.ensemble import RandomForestClassifier as skrfc\nfrom sklearn.ensemble import RandomForestRegressor as skrfr\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.datasets import fetch_california_housing, \\\n make_classification, make_regression\nfrom sklearn.model_selection import train_test_split\n\n\[email protected]('nrows', [unit_param(500), quality_param(5000),\n stress_param(500000)])\[email protected]('column_info', [unit_param([20, 10]),\n quality_param([200, 100]),\n stress_param([500, 350])])\[email protected]('rows_sample', [unit_param(1.0), quality_param(0.90),\n stress_param(0.95)])\[email protected]('datatype', [np.float32])\[email protected]('split_algo', [0, 1])\[email protected]('max_features', [1.0, 'auto', 'log2', 'sqrt'])\[email protected]('min_impurity_decrease', [0.0, 1e-10])\ndef test_rf_classification(datatype, split_algo, rows_sample,\n nrows, column_info, max_features,\n min_impurity_decrease):\n use_handle = True\n ncols, n_info = column_info\n\n if datatype == np.float64:\n pytest.xfail(\"Datatype np.float64 will run only on the CPU\"\n \" please convert the data to dtype np.float32\")\n\n X, y = make_classification(n_samples=nrows, n_features=ncols,\n n_clusters_per_class=1, n_informative=n_info,\n random_state=123, n_classes=2)\n X = X.astype(datatype)\n y = y.astype(np.int32)\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,\n random_state=0)\n # Create a handle for the cuml model\n handle, stream = get_handle(use_handle, n_streams=8)\n\n # Initialize, fit and predict using cuML's\n # random forest classification model\n cuml_model = curfc(max_features=max_features, rows_sample=rows_sample,\n n_bins=8, split_algo=split_algo, split_criterion=0,\n min_rows_per_node=2,\n n_estimators=40, handle=handle, max_leaves=-1,\n max_depth=16,\n min_impurity_decrease=min_impurity_decrease)\n cuml_model.fit(X_train, y_train)\n fil_preds = cuml_model.predict(X_test,\n predict_model=\"GPU\",\n output_class=True,\n threshold=0.5,\n algo='BATCH_TREE_REORG')\n cu_predict = cuml_model.predict(X_test, predict_model=\"CPU\")\n cuml_acc = accuracy_score(y_test, cu_predict)\n fil_acc = accuracy_score(y_test, fil_preds)\n assert fil_acc >= (cuml_acc - 0.02)\n if nrows < 500000:\n sk_model = skrfc(n_estimators=40,\n max_depth=16,\n min_samples_split=2, max_features=max_features,\n random_state=10,\n min_impurity_decrease=min_impurity_decrease)\n sk_model.fit(X_train, y_train)\n sk_predict = sk_model.predict(X_test)\n sk_acc = accuracy_score(y_test, sk_predict)\n assert fil_acc >= (sk_acc - 0.07)\n\n\[email protected]('mode', [unit_param('unit'), quality_param('quality'),\n stress_param('stress')])\[email protected]('column_info', [unit_param([20, 10]),\n quality_param([200, 50]),\n stress_param([400, 100])])\[email protected]('rows_sample', [unit_param(1.0), quality_param(0.90),\n stress_param(0.95)])\[email protected]('datatype', [np.float32])\[email protected]('split_algo', [0, 1])\[email protected]('max_features', [1.0, 'auto', 'log2', 'sqrt'])\[email protected]('min_impurity_decrease', [0.0, 1e-10])\ndef test_rf_regression(datatype, split_algo, mode,\n column_info, max_features,\n rows_sample, min_impurity_decrease):\n\n ncols, n_info = column_info\n use_handle = True\n if datatype == np.float64:\n pytest.xfail(\"Datatype np.float64 will run only on the CPU\"\n \" please convert the data to dtype np.float32\")\n\n if mode == 'unit':\n X, y = make_regression(n_samples=500, n_features=ncols,\n n_informative=n_info,\n random_state=123)\n\n elif mode == 'quality':\n X, y = fetch_california_housing(return_X_y=True)\n\n else:\n X, y = make_regression(n_samples=100000, n_features=ncols,\n n_informative=n_info,\n random_state=123)\n X = X.astype(datatype)\n y = y.astype(datatype)\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,\n random_state=0)\n\n # Create a handle for the cuml model\n handle, stream = get_handle(use_handle, n_streams=8)\n\n # Initialize and fit using cuML's random forest regression model\n cuml_model = curfr(max_features=max_features, rows_sample=rows_sample,\n n_bins=16, split_algo=split_algo, split_criterion=2,\n min_rows_per_node=2,\n n_estimators=50, handle=handle, max_leaves=-1,\n max_depth=16, accuracy_metric='mse',\n min_impurity_decrease=min_impurity_decrease)\n\n cuml_model.fit(X_train, y_train)\n # predict using FIL\n fil_preds = cuml_model.predict(X_test, predict_model=\"GPU\")\n cu_preds = cuml_model.predict(X_test, predict_model=\"CPU\")\n cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype)\n fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)\n assert fil_r2 >= (cu_r2 - 0.02)\n # Initialize, fit and predict using\n # sklearn's random forest regression model\n if mode != 'stress':\n sk_model = skrfr(n_estimators=50, max_depth=16,\n min_samples_split=2, max_features=max_features,\n random_state=10,\n min_impurity_decrease=min_impurity_decrease)\n sk_model.fit(X_train, y_train)\n sk_predict = sk_model.predict(X_test)\n sk_r2 = r2_score(y_test, sk_predict, convert_dtype=datatype)\n assert fil_r2 >= (sk_r2 - 0.07)\n\n\[email protected]('datatype', [np.float32])\[email protected]('column_info', [unit_param([20, 10]),\n quality_param([200, 100]),\n stress_param([500, 350])])\[email protected]('nrows', [unit_param(500), quality_param(5000),\n stress_param(500000)])\ndef test_rf_classification_default(datatype, column_info, nrows):\n\n ncols, n_info = column_info\n X, y = make_classification(n_samples=nrows, n_features=ncols,\n n_clusters_per_class=1, n_informative=n_info,\n random_state=0, n_classes=2)\n X = X.astype(datatype)\n y = y.astype(np.int32)\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,\n random_state=0)\n # Initialize, fit and predict using cuML's\n # random forest classification model\n cuml_model = curfc()\n cuml_model.fit(X_train, y_train)\n cu_predict = cuml_model.predict(X_test)\n cu_acc = accuracy_score(y_test, cu_predict)\n\n # sklearn random forest classification model\n # initialization, fit and predict\n sk_model = skrfc(max_depth=16, random_state=10)\n sk_model.fit(X_train, y_train)\n sk_predict = sk_model.predict(X_test)\n sk_acc = accuracy_score(y_test, sk_predict)\n\n # compare the accuracy of the two models\n # github issue 1306: had to increase margin to avoid random CI fails\n # assert cu_acc >= (sk_acc - 0.07)\n assert cu_acc >= (sk_acc - 0.2)\n\n\[email protected]('datatype', [np.float32])\[email protected]('column_info', [unit_param([20, 10]),\n quality_param([200, 100]),\n stress_param([500, 350])])\[email protected]('nrows', [unit_param(500), quality_param(5000),\n stress_param(500000)])\ndef test_rf_regression_default(datatype, column_info, nrows):\n\n ncols, n_info = column_info\n X, y = make_regression(n_samples=100, n_features=ncols,\n n_informative=n_info,\n random_state=123)\n X = X.astype(datatype)\n y = y.astype(datatype)\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,\n random_state=0)\n\n # Initialize, fit and predict using cuML's\n # random forest classification model\n cuml_model = curfr()\n cuml_model.fit(X_train, y_train)\n\n # predict using FIL\n fil_preds = cuml_model.predict(X_test, predict_model=\"GPU\")\n cu_preds = cuml_model.predict(X_test, predict_model=\"CPU\")\n cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype)\n fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)\n\n # Initialize, fit and predict using\n # sklearn's random forest regression model\n sk_model = skrfr(max_depth=16, random_state=10)\n sk_model.fit(X_train, y_train)\n sk_predict = sk_model.predict(X_test)\n sk_r2 = r2_score(y_test, sk_predict, convert_dtype=datatype)\n print(fil_r2, cu_r2, sk_r2)\n try:\n assert fil_r2 >= (cu_r2 - 0.02)\n assert fil_r2 >= (sk_r2 - 0.07)\n except AssertionError:\n pytest.xfail(\"failed due to AssertionError error, \"\n \"fix will be merged soon\")\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.datasets.fetch_california_housing",
"sklearn.metrics.accuracy_score",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.datasets.make_regression",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.make_classification"
]
]
|
akashpalrecha/pytorch-NMF | [
"21f6589bf25e2ec3e90edf7d3f7eec538ce04fa0"
]
| [
"torchnmf/base.py"
]
| [
"from torch import nn\n\n\nclass Base(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fix_neg = nn.Threshold(0., 1e-8)\n\n def fit(self, *args, **kwargs):\n raise NotImplementedError\n\n def fit_transform(self, *args, **kwargs):\n raise NotImplementedError\n\n def sort(self):\n raise NotImplementedError\n"
]
| [
[
"torch.nn.Threshold"
]
]
|
KyuboNoh/HY | [
"8ba9815137c2cff2f1931a1940e1b762e8df0b02"
]
| [
"tests/base/test_Solver.py"
]
| [
"import unittest\nfrom SimPEG import *\nfrom SimPEG.Mesh import TensorMesh\nfrom SimPEG.Utils import sdiag\nimport numpy as np\nimport scipy.sparse as sparse\n\nTOLD = 1e-10\nTOLI = 1e-3\nnumRHS = 5\n\ndef dotest(MYSOLVER, multi=False, A=None, **solverOpts):\n if A is None:\n h1 = np.ones(10)*100.\n h2 = np.ones(10)*100.\n h3 = np.ones(10)*100.\n\n h = [h1,h2,h3]\n\n M = TensorMesh(h)\n\n D = M.faceDiv\n G = -M.faceDiv.T\n Msig = M.getFaceInnerProduct()\n A = D*Msig*G\n A[-1,-1] *= 1/M.vol[-1] # remove the constant null space from the matrix\n else:\n M = Mesh.TensorMesh([A.shape[0]])\n\n Ainv = MYSOLVER(A, **solverOpts)\n if multi:\n e = np.ones(M.nC)\n else:\n e = np.ones((M.nC, numRHS))\n rhs = A * e\n x = Ainv * rhs\n Ainv.clean()\n return np.linalg.norm(e-x,np.inf)\n\nclass TestSolver(unittest.TestCase):\n\n def test_direct_spsolve_1(self): self.assertLess(dotest(Solver, False),TOLD)\n def test_direct_spsolve_M(self): self.assertLess(dotest(Solver, True),TOLD)\n\n def test_direct_splu_1(self): self.assertLess(dotest(SolverLU, False),TOLD)\n def test_direct_splu_M(self): self.assertLess(dotest(SolverLU, True),TOLD)\n\n def test_iterative_diag_1(self): self.assertLess(dotest(SolverDiag, False, A=Utils.sdiag(np.random.rand(10)+1.0)),TOLI)\n def test_iterative_diag_M(self): self.assertLess(dotest(SolverDiag, True, A=Utils.sdiag(np.random.rand(10)+1.0)),TOLI)\n\n def test_iterative_cg_1(self): self.assertLess(dotest(SolverCG, False),TOLI)\n def test_iterative_cg_M(self): self.assertLess(dotest(SolverCG, True),TOLI)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.ones"
]
]
|
ziongh/cudf | [
"3195eb7fe3bf52ac68fc5e718e746df3bfcb683a",
"3195eb7fe3bf52ac68fc5e718e746df3bfcb683a"
]
| [
"python/cudf/cudf/core/column_accessor.py",
"python/cudf/cudf/utils/utils.py"
]
| [
"# Copyright (c) 2021, NVIDIA CORPORATION.\n\nfrom __future__ import annotations\n\nimport itertools\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Mapping,\n Optional,\n Tuple,\n Union,\n)\n\nimport pandas as pd\n\nimport cudf\nfrom cudf.utils.utils import (\n OrderedColumnDict,\n cached_property,\n to_flat_dict,\n to_nested_dict,\n)\n\nif TYPE_CHECKING:\n from cudf.core.column import ColumnBase\n\n\nclass ColumnAccessor(MutableMapping):\n\n _data: \"OrderedDict[Any, ColumnBase]\"\n multiindex: bool\n _level_names: Tuple[Any, ...]\n\n def __init__(\n self,\n data: Union[MutableMapping, ColumnAccessor] = None,\n multiindex: bool = False,\n level_names=None,\n ):\n \"\"\"\n Parameters\n ----------\n data : mapping\n Mapping of keys to column values.\n multiindex : bool, optional\n Whether tuple keys represent a hierarchical\n index with multiple \"levels\" (default=False).\n level_names : tuple, optional\n Tuple containing names for each of the levels.\n For a non-hierarchical index, a tuple of size 1\n may be passe.\n \"\"\"\n if data is None:\n data = {}\n # TODO: we should validate the keys of `data`\n if isinstance(data, ColumnAccessor):\n multiindex = multiindex or data.multiindex\n level_names = level_names or data.level_names\n self._data = data._data\n self.multiindex = multiindex\n self._level_names = level_names\n\n self._data = OrderedColumnDict(data)\n self.multiindex = multiindex\n self._level_names = level_names\n\n def __iter__(self):\n return self._data.__iter__()\n\n def __getitem__(self, key: Any) -> ColumnBase:\n return self._data[key]\n\n def __setitem__(self, key: Any, value: Any):\n self.set_by_label(key, value)\n self._clear_cache()\n\n def __delitem__(self, key: Any):\n self._data.__delitem__(key)\n self._clear_cache()\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __repr__(self) -> str:\n type_info = (\n f\"{self.__class__.__name__}(\"\n f\"multiindex={self.multiindex}, \"\n f\"level_names={self.level_names})\"\n )\n column_info = \"\\n\".join(\n [f\"{name}: {col.dtype}\" for name, col in self.items()]\n )\n return f\"{type_info}\\n{column_info}\"\n\n @property\n def level_names(self) -> Tuple[Any, ...]:\n if self._level_names is None or len(self._level_names) == 0:\n return tuple((None,) * max(1, self.nlevels))\n else:\n return self._level_names\n\n @property\n def nlevels(self) -> int:\n if len(self._data) == 0:\n return 0\n if not self.multiindex:\n return 1\n else:\n return len(next(iter(self.keys())))\n\n @property\n def name(self) -> Any:\n if len(self._data) == 0:\n return None\n return self.level_names[-1]\n\n @property\n def nrows(self) -> int:\n if len(self._data) == 0:\n return 0\n else:\n return len(next(iter(self.values())))\n\n @cached_property\n def names(self) -> Tuple[Any, ...]:\n return tuple(self.keys())\n\n @cached_property\n def columns(self) -> Tuple[ColumnBase, ...]:\n return tuple(self.values())\n\n @cached_property\n def _grouped_data(self) -> MutableMapping:\n \"\"\"\n If self.multiindex is True,\n return the underlying mapping as a nested mapping.\n \"\"\"\n if self.multiindex:\n return to_nested_dict(dict(zip(self.names, self.columns)))\n else:\n return self._data\n\n def _clear_cache(self):\n cached_properties = \"columns\", \"names\", \"_grouped_data\"\n for attr in cached_properties:\n try:\n self.__delattr__(attr)\n except AttributeError:\n pass\n\n def to_pandas_index(self) -> pd.Index:\n \"\"\"\"\n Convert the keys of the ColumnAccessor to a Pandas Index object.\n \"\"\"\n if self.multiindex and len(self.level_names) > 0:\n # Using `from_frame()` instead of `from_tuples`\n # prevents coercion of values to a different type\n # (e.g., ''->NaT)\n result = pd.MultiIndex.from_frame(\n pd.DataFrame(\n self.names, columns=self.level_names, dtype=\"object\"\n ),\n )\n else:\n result = pd.Index(self.names, name=self.name, tupleize_cols=False)\n return result\n\n def insert(self, name: Any, value: Any, loc: int = -1):\n \"\"\"\n Insert column into the ColumnAccessor at the specified location.\n\n Parameters\n ----------\n name : Name corresponding to the new column\n value : column-like\n loc : int, optional\n The location to insert the new value at.\n Must be (0 <= loc <= ncols). By default, the column is added\n to the end.\n\n Returns\n -------\n None, this function operates in-place.\n \"\"\"\n name = self._pad_key(name)\n\n ncols = len(self._data)\n if loc == -1:\n loc = ncols\n if not (0 <= loc <= ncols):\n raise ValueError(\n \"insert: loc out of bounds: must be 0 <= loc <= ncols\"\n )\n # TODO: we should move all insert logic here\n if name in self._data:\n raise ValueError(f\"Cannot insert '{name}', already exists\")\n if loc == len(self._data):\n self._data[name] = value\n else:\n new_keys = self.names[:loc] + (name,) + self.names[loc:]\n new_values = self.columns[:loc] + (value,) + self.columns[loc:]\n self._data = self._data.__class__(zip(new_keys, new_values))\n self._clear_cache()\n\n def copy(self, deep=False) -> ColumnAccessor:\n \"\"\"\n Make a copy of this ColumnAccessor.\n \"\"\"\n if deep:\n return self.__class__(\n {k: v.copy(deep=True) for k, v in self._data.items()},\n multiindex=self.multiindex,\n level_names=self.level_names,\n )\n return self.__class__(\n self._data.copy(),\n multiindex=self.multiindex,\n level_names=self.level_names,\n )\n\n def select_by_label(self, key: Any) -> ColumnAccessor:\n \"\"\"\n Return a subset of this column accessor,\n composed of the keys specified by `key`.\n\n Parameters\n ----------\n key : slice, list-like, tuple or scalar\n\n Returns\n -------\n ColumnAccessor\n \"\"\"\n if isinstance(key, slice):\n return self._select_by_label_slice(key)\n elif pd.api.types.is_list_like(key) and not isinstance(key, tuple):\n return self._select_by_label_list_like(key)\n else:\n if isinstance(key, tuple):\n if any(isinstance(k, slice) for k in key):\n return self._select_by_label_with_wildcard(key)\n return self._select_by_label_grouped(key)\n\n def select_by_index(self, index: Any) -> ColumnAccessor:\n \"\"\"\n Return a ColumnAccessor composed of the columns\n specified by index.\n\n Parameters\n ----------\n key : integer, integer slice, or list-like of integers\n\n Returns\n -------\n ColumnAccessor\n \"\"\"\n if isinstance(index, slice):\n start, stop, step = index.indices(len(self._data))\n keys = self.names[start:stop:step]\n elif pd.api.types.is_integer(index):\n keys = self.names[index : index + 1]\n else:\n keys = (self.names[i] for i in index)\n data = {k: self._data[k] for k in keys}\n return self.__class__(\n data, multiindex=self.multiindex, level_names=self.level_names,\n )\n\n def set_by_label(self, key: Any, value: Any):\n \"\"\"\n Add (or modify) column by name.\n\n Parameters\n ----------\n key : name of the column\n value : column-like\n \"\"\"\n key = self._pad_key(key)\n self._data[key] = value\n self._clear_cache()\n\n def _select_by_label_list_like(self, key: Any) -> ColumnAccessor:\n return self.__class__(\n to_flat_dict({k: self._grouped_data[k] for k in key}),\n multiindex=self.multiindex,\n level_names=self.level_names,\n )\n\n def _select_by_label_grouped(self, key: Any) -> ColumnAccessor:\n result = self._grouped_data[key]\n if isinstance(result, cudf.core.column.ColumnBase):\n return self.__class__({key: result})\n else:\n result = to_flat_dict(result)\n if not isinstance(key, tuple):\n key = (key,)\n return self.__class__(\n result,\n multiindex=self.nlevels - len(key) > 1,\n level_names=self.level_names[len(key) :],\n )\n\n def _select_by_label_slice(self, key: slice) -> ColumnAccessor:\n start, stop = key.start, key.stop\n if key.step is not None:\n raise TypeError(\"Label slicing with step is not supported\")\n\n if start is None:\n start = self.names[0]\n if stop is None:\n stop = self.names[-1]\n start = self._pad_key(start, slice(None))\n stop = self._pad_key(stop, slice(None))\n for idx, name in enumerate(self.names):\n if _compare_keys(name, start):\n start_idx = idx\n break\n for idx, name in enumerate(reversed(self.names)):\n if _compare_keys(name, stop):\n stop_idx = len(self.names) - idx\n break\n keys = self.names[start_idx:stop_idx]\n return self.__class__(\n {k: self._data[k] for k in keys},\n multiindex=self.multiindex,\n level_names=self.level_names,\n )\n\n def _select_by_label_with_wildcard(self, key: Any) -> ColumnAccessor:\n key = self._pad_key(key, slice(None))\n return self.__class__(\n {k: self._data[k] for k in self._data if _compare_keys(k, key)},\n multiindex=self.multiindex,\n level_names=self.level_names,\n )\n\n def _pad_key(self, key: Any, pad_value=\"\") -> Any:\n \"\"\"\n Pad the provided key to a length equal to the number\n of levels.\n \"\"\"\n if not self.multiindex:\n return key\n if not isinstance(key, tuple):\n key = (key,)\n return key + (pad_value,) * (self.nlevels - len(key))\n\n def rename_levels(\n self, mapper: Union[Mapping[Any, Any], Callable], level: Optional[int]\n ) -> ColumnAccessor:\n \"\"\"\n Rename the specified levels of the given ColumnAccessor\n\n Parameters\n ----------\n self : ColumnAccessor of a given dataframe\n\n mapper : dict-like or function transformations to apply to\n the column label values depending on selected ``level``.\n\n If dict-like, only replace the specified level of the\n ColumnAccessor's keys (that match the mapper's keys) with\n mapper's values\n\n If callable, the function is applied only to the specified level\n of the ColumnAccessor's keys.\n\n level : int\n In case of RangeIndex, only supported level is [0, None].\n In case of a MultiColumn, only the column labels in the specified\n level of the ColumnAccessor's keys will be transformed.\n\n Returns\n -------\n A new ColumnAccessor with values in the keys replaced according\n to the given mapper and level.\n\n \"\"\"\n if self.multiindex:\n\n def rename_column(x):\n x = list(x)\n if isinstance(mapper, Mapping):\n x[level] = mapper.get(x[level], x[level])\n else:\n x[level] = mapper(x[level])\n x = tuple(x)\n return x\n\n if level is None:\n raise NotImplementedError(\n \"Renaming columns with a MultiIndex and level=None is\"\n \"not supported\"\n )\n new_names = map(rename_column, self.keys())\n ca = ColumnAccessor(\n dict(zip(new_names, self.values())),\n level_names=self.level_names,\n multiindex=self.multiindex,\n )\n\n else:\n if level is None:\n level = 0\n if level != 0:\n raise IndexError(\n f\"Too many levels: Index has only 1 level, not {level+1}\"\n )\n if isinstance(mapper, Mapping):\n new_names = (\n mapper.get(col_name, col_name) for col_name in self.keys()\n )\n else:\n new_names = (mapper(col_name) for col_name in self.keys())\n ca = ColumnAccessor(\n dict(zip(new_names, self.values())),\n level_names=self.level_names,\n multiindex=self.multiindex,\n )\n\n return self.__class__(ca)\n\n\ndef _compare_keys(target: Any, key: Any) -> bool:\n \"\"\"\n Compare `key` to `target`.\n\n Return True if each value in `key` == corresponding value in `target`.\n If any value in `key` is slice(None), it is considered equal\n to the corresponding value in `target`.\n \"\"\"\n if not isinstance(target, tuple):\n return target == key\n for k1, k2 in itertools.zip_longest(target, key, fillvalue=None):\n if k2 == slice(None):\n continue\n if k1 != k2:\n return False\n return True\n",
"# Copyright (c) 2020-2021, NVIDIA CORPORATION.\n\nimport functools\nfrom collections import OrderedDict\nfrom collections.abc import Sequence\nfrom math import floor, isinf, isnan\n\nimport cupy as cp\nimport numpy as np\nimport pandas as pd\nfrom numba import njit\n\nimport rmm\n\nimport cudf\nfrom cudf.core import column\nfrom cudf.core.buffer import Buffer\nfrom cudf.utils.dtypes import to_cudf_compatible_scalar\n\nmask_dtype = np.dtype(np.int32)\nmask_bitsize = mask_dtype.itemsize * 8\n\n_EQUALITY_OPS = {\n \"eq\",\n \"ne\",\n \"lt\",\n \"gt\",\n \"le\",\n \"ge\",\n \"__eq__\",\n \"__ne__\",\n \"__lt__\",\n \"__gt__\",\n \"__le__\",\n \"__ge__\",\n}\n\n\n@njit\ndef mask_get(mask, pos):\n return (mask[pos // mask_bitsize] >> (pos % mask_bitsize)) & 1\n\n\n@njit\ndef check_equals_float(a, b):\n return (\n a == b\n or (isnan(a) and isnan(b))\n or ((isinf(a) and a < 0) and (isinf(b) and b < 0))\n or ((isinf(a) and a > 0) and (isinf(b) and b > 0))\n )\n\n\n@njit\ndef rint(x):\n \"\"\"Round to the nearest integer.\n\n Returns\n -------\n The nearest integer, as a float.\n \"\"\"\n y = floor(x)\n r = x - y\n\n if r > 0.5:\n y += 1.0\n if r == 0.5:\n r = y - 2.0 * floor(0.5 * y)\n if r == 1.0:\n y += 1.0\n return y\n\n\n@njit\ndef check_equals_int(a, b):\n return a == b\n\n\ndef scalar_broadcast_to(scalar, size, dtype=None):\n\n if isinstance(size, (tuple, list)):\n size = size[0]\n\n if scalar is None or (\n isinstance(scalar, (np.datetime64, np.timedelta64))\n and np.isnat(scalar)\n ):\n if dtype is None:\n dtype = \"object\"\n return column.column_empty(size, dtype=dtype, masked=True)\n\n if isinstance(scalar, pd.Categorical):\n if dtype is None:\n return _categorical_scalar_broadcast_to(scalar, size)\n else:\n return scalar_broadcast_to(scalar.categories[0], size).astype(\n dtype\n )\n\n scalar = to_cudf_compatible_scalar(scalar, dtype=dtype)\n dtype = scalar.dtype\n\n if np.dtype(dtype).kind in (\"O\", \"U\"):\n gather_map = column.full(size, 0, dtype=\"int32\")\n scalar_str_col = column.as_column([scalar], dtype=\"str\")\n return scalar_str_col[gather_map]\n else:\n out_col = column.column_empty(size, dtype=dtype)\n if out_col.size != 0:\n out_col.data_array_view[:] = scalar\n return out_col\n\n\ndef normalize_index(index, size, doraise=True):\n \"\"\"Normalize negative index\n \"\"\"\n if index < 0:\n index = size + index\n if doraise and not (0 <= index < size):\n raise IndexError(\"out-of-bound\")\n return min(index, size)\n\n\nlist_types_tuple = (list, np.array)\n\n\ndef get_result_name(left, right):\n \"\"\"\n This function will give appropriate name for the operations\n involving two Series, Index's or combination of both.\n\n Parameters\n ----------\n left : {Series, Index}\n right : object\n\n Returns\n -------\n name : object {string or None}\n \"\"\"\n\n if isinstance(right, (cudf.Series, cudf.Index, pd.Series, pd.Index)):\n name = compare_and_get_name(left, right)\n else:\n name = left.name\n return name\n\n\ndef compare_and_get_name(a, b):\n \"\"\"\n If both a & b have name attribute, and they are\n same return the common name.\n Else, return either one of the name of a or b,\n whichever is present.\n\n Parameters\n ----------\n a : object\n b : object\n\n Returns\n -------\n name : str or None\n \"\"\"\n a_has = hasattr(a, \"name\")\n b_has = hasattr(b, \"name\")\n\n if a_has and b_has:\n if a.name == b.name:\n return a.name\n else:\n return None\n elif a_has:\n return a.name\n elif b_has:\n return b.name\n return None\n\n\ndef initfunc(f):\n \"\"\"\n Decorator for initialization functions that should\n be run exactly once.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if wrapper.initialized:\n return\n wrapper.initialized = True\n return f(*args, **kwargs)\n\n wrapper.initialized = False\n return wrapper\n\n\ndef get_null_series(size, dtype=np.bool_):\n \"\"\"\n Creates a null series of provided dtype and size\n\n Parameters\n ----------\n size: length of series\n dtype: dtype of series to create; defaults to bool.\n\n Returns\n -------\n a null cudf series of provided `size` and `dtype`\n \"\"\"\n\n empty_col = column.column_empty(size, dtype, True)\n return cudf.Series(empty_col)\n\n\n# taken from dask array\n# https://github.com/dask/dask/blob/master/dask/array/utils.py#L352-L363\ndef _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False\n\n\n@initfunc\ndef set_allocator(\n allocator=\"default\",\n pool=False,\n initial_pool_size=None,\n enable_logging=False,\n):\n \"\"\"\n Set the GPU memory allocator. This function should be run only once,\n before any cudf objects are created.\n\n allocator : {\"default\", \"managed\"}\n \"default\": use default allocator.\n \"managed\": use managed memory allocator.\n pool : bool\n Enable memory pool.\n initial_pool_size : int\n Memory pool size in bytes. If ``None`` (default), 1/2 of total\n GPU memory is used. If ``pool=False``, this argument is ignored.\n enable_logging : bool, optional\n Enable logging (default ``False``).\n Enabling this option will introduce performance overhead.\n \"\"\"\n use_managed_memory = True if allocator == \"managed\" else False\n\n rmm.reinitialize(\n pool_allocator=pool,\n managed_memory=use_managed_memory,\n initial_pool_size=initial_pool_size,\n logging=enable_logging,\n )\n\n\nIS_NEP18_ACTIVE = _is_nep18_active()\n\n\nclass cached_property:\n \"\"\"\n Like @property, but only evaluated upon first invocation.\n To force re-evaluation of a cached_property, simply delete\n it with `del`.\n \"\"\"\n\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, cls):\n if instance is None:\n return self\n else:\n value = self.func(instance)\n setattr(instance, self.func.__name__, value)\n return value\n\n\nclass ColumnValuesMappingMixin:\n \"\"\"\n Coerce provided values for the mapping to Columns.\n \"\"\"\n\n def __setitem__(self, key, value):\n\n value = column.as_column(value)\n super().__setitem__(key, value)\n\n\nclass EqualLengthValuesMappingMixin:\n \"\"\"\n Require all values in the mapping to have the same length.\n \"\"\"\n\n def __setitem__(self, key, value):\n if len(self) > 0:\n first = next(iter(self.values()))\n if len(value) != len(first):\n raise ValueError(\"All values must be of equal length\")\n super().__setitem__(key, value)\n\n\nclass OrderedColumnDict(\n ColumnValuesMappingMixin, EqualLengthValuesMappingMixin, OrderedDict\n):\n pass\n\n\nclass NestedMappingMixin:\n \"\"\"\n Make missing values of a mapping empty instances\n of the same type as the mapping.\n \"\"\"\n\n def __getitem__(self, key):\n if isinstance(key, tuple):\n d = self\n for k in key[:-1]:\n d = d[k]\n return d.__getitem__(key[-1])\n else:\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n if isinstance(key, tuple):\n d = self\n for k in key[:-1]:\n d = d.setdefault(k, self.__class__())\n d.__setitem__(key[-1], value)\n else:\n super().__setitem__(key, value)\n\n\nclass NestedOrderedDict(NestedMappingMixin, OrderedDict):\n pass\n\n\ndef to_flat_dict(d):\n \"\"\"\n Convert the given nested dictionary to a flat dictionary\n with tuple keys.\n \"\"\"\n\n def _inner(d, parents=None):\n if parents is None:\n parents = []\n for k, v in d.items():\n if not isinstance(v, d.__class__):\n if parents:\n k = tuple(parents + [k])\n yield (k, v)\n else:\n yield from _inner(d=v, parents=parents + [k])\n\n return {k: v for k, v in _inner(d)}\n\n\ndef to_nested_dict(d):\n \"\"\"\n Convert the given dictionary with tuple keys to a NestedOrderedDict.\n \"\"\"\n return NestedOrderedDict(d)\n\n\ndef time_col_replace_nulls(input_col):\n\n null = column.column_empty_like(input_col, masked=True, newsize=1)\n out_col = cudf._lib.replace.replace(\n input_col,\n column.as_column(\n Buffer(\n np.array(\n [input_col.default_na_value()], dtype=input_col.dtype\n ).view(\"|u1\")\n ),\n dtype=input_col.dtype,\n ),\n null,\n )\n return out_col\n\n\ndef raise_iteration_error(obj):\n raise TypeError(\n f\"{obj.__class__.__name__} object is not iterable. \"\n f\"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` \"\n f\"if you wish to iterate over the values.\"\n )\n\n\ndef pa_mask_buffer_to_mask(mask_buf, size):\n \"\"\"\n Convert PyArrow mask buffer to cuDF mask buffer\n \"\"\"\n mask_size = cudf._lib.null_mask.bitmask_allocation_size_bytes(size)\n if mask_buf.size < mask_size:\n dbuf = rmm.DeviceBuffer(size=mask_size)\n dbuf.copy_from_host(np.asarray(mask_buf).view(\"u1\"))\n return Buffer(dbuf)\n return Buffer(mask_buf)\n\n\ndef isnat(val):\n if not isinstance(val, (np.datetime64, np.timedelta64, str)):\n return False\n else:\n return val in {\"NaT\", \"NAT\"} or np.isnat(val)\n\n\ndef _fillna_natwise(col):\n # If the value we are filling is np.datetime64(\"NAT\")\n # we set the same mask as current column.\n # However where there are \"<NA>\" in the\n # columns, their corresponding locations\n nat = cudf._lib.scalar._create_proxy_nat_scalar(col.dtype)\n result = cudf._lib.replace.replace_nulls(col, nat)\n return column.build_column(\n data=result.base_data,\n dtype=result.dtype,\n mask=col.base_mask,\n size=result.size,\n offset=result.offset,\n children=result.base_children,\n )\n\n\ndef search_range(start, stop, x, step=1, side=\"left\"):\n \"\"\"Find the position to insert a value in a range, so that the resulting\n sequence remains sorted.\n\n When ``side`` is set to 'left', the insertion point ``i`` will hold the\n following invariant:\n `all(x < n for x in range_left) and all(x >= n for x in range_right)`\n where ``range_left`` and ``range_right`` refers to the range to the left\n and right of position ``i``, respectively.\n\n When ``side`` is set to 'right', ``i`` will hold the following invariant:\n `all(x <= n for x in range_left) and all(x > n for x in range_right)`\n\n Parameters\n --------\n start : int\n Start value of the series\n stop : int\n Stop value of the range\n x : int\n The value to insert\n step : int, default 1\n Step value of the series, assumed positive\n side : {'left', 'right'}, default 'left'\n See description for usage.\n\n Returns\n --------\n int\n Insertion position of n.\n\n Examples\n --------\n For series: 1 4 7\n >>> search_range(start=1, stop=10, x=4, step=3, side=\"left\")\n 1\n >>> search_range(start=1, stop=10, x=4, step=3, side=\"right\")\n 2\n \"\"\"\n z = 1 if side == \"left\" else 0\n i = (x - start - z) // step + 1\n\n length = (stop - start) // step\n return max(min(length, i), 0)\n\n\n_UFUNC_ALIASES = {\n \"power\": \"pow\",\n \"equal\": \"eq\",\n \"not_equal\": \"ne\",\n \"less\": \"lt\",\n \"less_equal\": \"le\",\n \"greater\": \"gt\",\n \"greater_equal\": \"ge\",\n \"absolute\": \"abs\",\n}\n# For op(., cudf.Series) -> cudf.Series.__r{op}__\n_REVERSED_NAMES = {\n \"lt\": \"__gt__\",\n \"le\": \"__ge__\",\n \"gt\": \"__lt__\",\n \"ge\": \"__le__\",\n \"eq\": \"__eq__\",\n \"ne\": \"__ne__\",\n}\n\n\n# todo: can probably be used to remove cudf/core/ops.py\ndef _get_cudf_series_ufunc(fname, args, kwargs, cudf_ser_submodule):\n if isinstance(args[0], cudf.Series):\n cudf_ser_func = getattr(cudf_ser_submodule, fname)\n return cudf_ser_func(*args, **kwargs)\n elif len(args) == 2 and isinstance(args[1], cudf.Series):\n rev_name = _REVERSED_NAMES.get(fname, f\"__r{fname}__\")\n cudf_ser_func = getattr(cudf_ser_submodule, rev_name)\n return cudf_ser_func(args[1], args[0], **kwargs)\n return NotImplemented\n\n\n# Utils for using appropriate dispatch for array functions\ndef get_appropriate_dispatched_func(\n cudf_submodule, cudf_ser_submodule, cupy_submodule, func, args, kwargs\n):\n if kwargs.get(\"out\") is None:\n fname = func.__name__\n # Dispatch these functions to appropiate alias from the _UFUNC_ALIASES\n is_ufunc = fname in _UFUNC_ALIASES\n fname = _UFUNC_ALIASES.get(fname, fname)\n\n if hasattr(cudf_submodule, fname):\n cudf_func = getattr(cudf_submodule, fname)\n return cudf_func(*args, **kwargs)\n\n elif hasattr(cudf_ser_submodule, fname):\n if is_ufunc:\n return _get_cudf_series_ufunc(\n fname, args, kwargs, cudf_ser_submodule\n )\n else:\n cudf_ser_func = getattr(cudf_ser_submodule, fname)\n return cudf_ser_func(*args, **kwargs)\n\n elif hasattr(cupy_submodule, fname):\n cupy_func = getattr(cupy_submodule, fname)\n # Handle case if cupy impliments it as a numpy function\n # Unsure if needed\n if cupy_func is func:\n return NotImplemented\n\n cupy_compatible_args, index = _get_cupy_compatible_args_index(args)\n if cupy_compatible_args:\n cupy_output = cupy_func(*cupy_compatible_args, **kwargs)\n return _cast_to_appropriate_cudf_type(cupy_output, index)\n\n return NotImplemented\n\n\ndef _cast_to_appropriate_cudf_type(val, index=None):\n # Handle scalar\n if val.ndim == 0:\n return cudf.Scalar(val).value\n # 1D array\n elif (val.ndim == 1) or (val.ndim == 2 and val.shape[1] == 1):\n # if index is not None and is of a different length\n # than the index, cupy dispatching behaviour is undefined\n # so we dont impliment it\n if (index is None) or (len(index) == len(val)):\n return cudf.Series(val, index=index)\n\n return NotImplemented\n\n\ndef _get_cupy_compatible_args_index(args, ser_index=None):\n \"\"\"\n This function returns cupy compatible arguments and output index\n if conversion is not possible it returns None\n \"\"\"\n\n casted_ls = []\n for arg in args:\n if isinstance(arg, cp.ndarray):\n casted_ls.append(arg)\n elif isinstance(arg, cudf.Series):\n # check if indexes can be aligned\n if (ser_index is None) or (ser_index.equals(arg.index)):\n ser_index = arg.index\n casted_ls.append(arg.values)\n else:\n # this throws a value-error if indexes are not aligned\n # following pandas behavior for ufunc numpy dispatching\n raise ValueError(\n \"Can only compare identically-labeled Series objects\"\n )\n elif isinstance(arg, Sequence):\n # we dont handle list of inputs for functions as\n # these form inputs for functions like\n # np.concatenate, vstack have ambiguity around index alignment\n return None, ser_index\n else:\n casted_ls.append(arg)\n return casted_ls, ser_index\n\n\ndef get_relevant_submodule(func, module):\n # point to the correct submodule\n for submodule in func.__module__.split(\".\")[1:]:\n if hasattr(module, submodule):\n module = getattr(module, submodule)\n else:\n return None\n return module\n\n\ndef _categorical_scalar_broadcast_to(cat_scalar, size):\n if isinstance(cat_scalar, (cudf.Series, pd.Series)):\n cats = cat_scalar.cat.categories\n code = cat_scalar.cat.codes[0]\n ordered = cat_scalar.cat.ordered\n else:\n # handles pd.Categorical, cudf.categorical.CategoricalColumn\n cats = cat_scalar.categories\n code = cat_scalar.codes[0]\n ordered = cat_scalar.ordered\n\n cats = column.as_column(cats)\n codes = scalar_broadcast_to(code, size)\n\n return column.build_categorical_column(\n categories=cats,\n codes=codes,\n mask=codes.base_mask,\n size=codes.size,\n offset=codes.offset,\n ordered=ordered,\n )\n\n\ndef _create_pandas_series(\n data=None, index=None, dtype=None, name=None, copy=False, fastpath=False\n):\n \"\"\"\n Wrapper to create a Pandas Series. If the length of data is 0 and\n dtype is not passed, this wrapper defaults the dtype to `float64`.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series. If data is a dict, argument\n order is maintained.\n index : array-like or Index (1d)\n Values must be hashable and have the same length as data.\n Non-unique index values are allowed. Will default to\n RangeIndex (0, 1, 2, …, n) if not provided.\n If data is dict-like and index is None, then the keys\n in the data are used as the index. If the index is not None,\n the resulting Series is reindexed with the index values.\n dtype : str, numpy.dtype, or ExtensionDtype, optional\n Data type for the output Series. If not specified, this\n will be inferred from data. See the user guide for more usages.\n name : str, optional\n The name to give to the Series.\n copy : bool, default False\n Copy input data.\n\n Returns\n -------\n pd.Series\n \"\"\"\n if (data is None or len(data) == 0) and dtype is None:\n dtype = \"float64\"\n return pd.Series(\n data=data,\n index=index,\n dtype=dtype,\n name=name,\n copy=copy,\n fastpath=fastpath,\n )\n"
]
| [
[
"pandas.api.types.is_integer",
"pandas.DataFrame",
"pandas.Index",
"pandas.api.types.is_list_like"
],
[
"numpy.isnat",
"numpy.asarray",
"numpy.dtype",
"pandas.Series"
]
]
|
alexgonzl/TreeMazeAnalyses | [
"a834dc6b59beffe6bce59cdd9749b761fab3fe08"
]
| [
"PreProcessing/robust_stats.py"
]
| [
"import numpy as np\n\ndef mad(x):\n \"\"\" Computes median absolute deviation for an array.\n Defined as: median(abs(x-median(x)))\n\n Parameters\n ----------\n x: input numpy array (1D)\n\n Returns\n -------\n median absolute deviation (ignoring nan values)\n\n \"\"\"\n medx = np.nanmedian(x)\n return np.nanmedian(np.abs(x-medx))\n\ndef movmad(x,window):\n \"\"\" Computes the moving median absolute deviation for a 1D array. Returns\n an array with the same length of the input array.\n Defined as: median(abs(Ai-median(x)))\n where Ai is a segment of the array x of length window.\n Small window length provides a finer description of deviation\n Longer window coarser (faster to compute).\n\n By default, each segment is centered, going L/2 to L/2-1 around Ai.\n For example for window = 4 and x= [1,2,1,2,5,2,5,2]\n A1=[0,1,2,3], A2=[4,5,6,7], the return array will be\n [1,1,1,1,3,3,3,3]\n\n Parameters\n ----------\n x : input numpy array (1D)\n window : integer for the evaluation window,\n Returns\n -------\n median absolute deviation (ignoring nan values)\n\n \"\"\"\n if not type(x)==np.ndarray:\n x=np.array(x)\n\n if window%2:\n window=window-1\n win2 = np.int(window/2)\n N=len(x)\n medx =mad(x)\n y=np.full(N,medx)\n for ii in np.arange(win2,N-win2+1,window):\n try:\n idx=(np.arange(-win2,win2)+ii).astype(np.int)\n y[idx] = np.median(np.abs((x[idx]-medx)))\n except:\n pass\n return y\n\ndef movstd(x,window):\n \"\"\" Computes the moving standard deviation for a 1D array. Returns\n an array with the same length of the input array.\n\n Small window length provides a finer description of deviation\n Longer window coarser (faster to compute).\n\n By default, each segment is centered, going L/2 to L/2-1 around Ai.\n\n\n Parameters\n ----------\n x : input numpy array (1D)\n window : integer for the evaluation window,\n Returns\n -------\n 1d vector of standard deviations\n\n \"\"\"\n if not type(x)==np.ndarray:\n x=np.array(x)\n\n if window%2:\n window=window-1\n\n win2 = np.floor(window/2)\n N=len(x)\n y=np.full(N,medx)\n for ii in np.arange(win2,N-win2+1,window):\n try:\n idx=(np.arange(-win2,win2)+ii).astype(np.int)\n y[idx] = np.nanstd(x[idx])\n except:\n pass\n return y\n\ndef robust_zscore(signal):\n \"\"\" robust_zscore\n function that uses median and median absolute deviation to standard the\n input vector\n\n Parameters\n ----------\n x : input numpy array (1D)\n\n Returns\n -------\n z : standarized vector with zero median and std ~1 (without outliers)\n\n \"\"\"\n return (signal-np.nanmedian(signal))/(mad(signal)*1.4826)\n\ndef sig_stats(signal):\n \"\"\" sig_stats\n function that returns various signal statistics:\n std, mad, min, max\n\n Parameters\n ----------\n signal : input numpy array (1D)\n\n Returns\n -------\n out : a dictionary with the above signal statistics\n\n \"\"\"\n out = {}\n out['std'] = \"{0:0.3e}\".format(np.std(signal))\n out['mad'] = \"{0:0.3e}\".format(mad(signal))\n out['min'] = \"{0:0.3e}\".format(np.nanmin(signal))\n out['max'] = \"{0:0.3e}\".format(np.nanmax(signal))\n return out\n"
]
| [
[
"numpy.full",
"numpy.int",
"numpy.array",
"numpy.nanmin",
"numpy.std",
"numpy.nanstd",
"numpy.arange",
"numpy.abs",
"numpy.nanmax",
"numpy.nanmedian",
"numpy.floor"
]
]
|
nidiascampos/smartgreen | [
"d574d90918702ac3bd383ed77d673f871576c5b0"
]
| [
"portPython/functions/normalizar.py"
]
| [
"import pandas as pd\r\n\r\ndef normalizarDados(data):\r\n for index, row in data.iterrows():\r\n data.at[index, 'when'] = pd.Timestamp(row['when']).replace(minute=0, second=0, microsecond=0)\r\n\r\ndef table2timetable(data):\r\n data = data.set_index(['when'])\r\n data.index = pd.to_datetime(data.index)\r\n data = data.sort_index()\r\n return data\r\n\r\ndef retime(data, frequency='H', method='linear'):\r\n data = table2timetable(data) # Transforma em Timetable\r\n data = data.resample(frequency).bfill() # Resample (Hora)\r\n data = data.interpolate(method=method) # Interpolar (linear)\r\n\r\n return data\r\n\r\n"
]
| [
[
"pandas.to_datetime",
"pandas.Timestamp"
]
]
|
iory/ROMP | [
"d50bab681b5a60d15526fbeec1ed98cb020864b2"
]
| [
"romp/lib/models/basic_modules.py"
]
| [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport torch\nimport torch.nn as nn\n\nimport sys, os\nroot_dir = os.path.join(os.path.dirname(__file__),'..')\nif root_dir not in sys.path:\n sys.path.insert(0, root_dir)\nfrom config import args\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass BasicBlock_IBN_a(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock_IBN_a, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = IBN_a(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, BN=nn.BatchNorm2d):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BN(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\ndef conv3x3_1D(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass BasicBlock_1D(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1):\n super(BasicBlock_1D, self).__init__()\n self.conv1 = conv3x3_1D(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm1d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3_1D(planes, planes)\n self.bn2 = nn.BatchNorm1d(planes, momentum=BN_MOMENTUM)\n self.stride = stride\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n return out\n\ndef conv3x3_3D(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass BasicBlock_3D(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1):\n super(BasicBlock_3D, self).__init__()\n self.conv1 = conv3x3_3D(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3_3D(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes, momentum=BN_MOMENTUM)\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).__init__()\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels)\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(True)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\n stride=1):\n downsample = None\n if stride != 1 or \\\n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(num_channels[branch_index] * block.expansion,\n momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index], stride, downsample))\n self.num_inchannels[branch_index] = \\\n num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index]))\n\n return nn.Sequential(*layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(\n self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n nn.BatchNorm2d(num_inchannels[i]),\n nn.Upsample(scale_factor=2**(j-i), mode='nearest')))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i-j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n nn.BatchNorm2d(num_outchannels_conv3x3)))\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n nn.BatchNorm2d(num_outchannels_conv3x3),\n nn.ReLU(True)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches[0](x[0])]\n\n for i in range(self.num_branches):\n x[i] = self.branches[i](x[i])\n\n x_fuse = []\n\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BASIC_IBN_a': BasicBlock_IBN_a,\n 'BOTTLENECK': Bottleneck\n}\n\nclass IBN_a(nn.Module):\n def __init__(self, planes, momentum=BN_MOMENTUM):\n super(IBN_a, self).__init__()\n half1 = int(planes/2)\n self.half = half1\n half2 = planes - half1\n self.IN = nn.InstanceNorm2d(half1, affine=True)\n self.BN = nn.BatchNorm2d(half2, momentum=momentum)\n \n def forward(self, x):\n split = torch.split(x, self.half, 1)\n out1 = self.IN(split[0].contiguous())\n out2 = self.BN(split[1].contiguous())\n out = torch.cat((out1, out2), 1)\n return out\n"
]
| [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.Conv3d",
"torch.nn.InstanceNorm2d",
"torch.nn.BatchNorm3d"
]
]
|
doem97/PSENet | [
"4d95395658662f2223805c36dcd573d9e190ce26"
]
| [
"models/post_processing/pse/setup.py"
]
| [
"from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy\n\nsetup(ext_modules=cythonize(Extension(\n 'pse',\n sources=['pse.pyx'],\n language='c++',\n include_dirs=[numpy.get_include()],\n library_dirs=[],\n libraries=[],\n extra_compile_args=['-O3'],\n extra_link_args=[]\n)))\n"
]
| [
[
"numpy.get_include"
]
]
|
xyyphant0m/spartan2 | [
"6c40247052cdce80c2787c9ee0c5485e218c082d"
]
| [
"spartan/tensor/timeseries.py"
]
| [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : timeseries.py\n@Desc : Definition of timeseries structure.\n'''\n\n# here put the import lib\nfrom . import DTensor\n\n\nclass Timeseries:\n def __init__(self, val_tensor: DTensor, time_tensor: DTensor = None, labels: list = None, freq: int = 1, startts: int = 0):\n \"\"\"A class designed for time series data.\n\n Parameters\n ----------\n val_tensor : DTensor\n value tensor\n\n time_tensor : DTensor\n time tensor, default is None\n\n labels : list\n list of column names, default is None\n\n freq : int\n frequency of this series, default is 1\n\n startts : int\n start timetick, default is 0\n\n Examples\n ----------\n Timeseries can be constructed in many styles. Among all parameters, only val_tensor is necessary.\n\n Normally, val_tensor, time_tensor, and labels are passed in. Length of labels and val_tensor will be determined to be equal.\n And meanwhile, freq, startts will be invalid and inferred from time tensor.\n\n >>> Timeseries(val_tensor, time_tensor, labels=['col_1', 'col_2'])\n\n If labels are missing, program will defaultly assign a list of labels, as ['dim_1', 'dim_2', ...]\n\n >>> Timeseries(val_tensor, time_tensor)\n\n If time tensor is missed, program will automatically create a time tensor with parameter freq and startts.\n\n >>> Timeseries(val_tensor, freq=2, startts=100)\n \"\"\"\n self.freq = freq\n self.val_tensor = val_tensor.T\n self.dimension, self.length = self.val_tensor.shape\n if labels is None:\n self.labels = ['dim_' + str(i) for i in range(self.dimension)]\n else:\n self.labels = list(labels)\n if time_tensor is None:\n self.startts = startts\n import numpy as np\n self.time_tensor = self.__init_time(self.val_tensor.shape[1], self.freq, self.startts)\n else:\n self.startts = time_tensor[0]\n self.freq = (self.length) / (time_tensor.max() - time_tensor.min())\n self.time_tensor = time_tensor\n\n def __len__(self):\n \"\"\"Return and update length of time tensor as length of time series object.\n\n Returns\n ----------\n self.length\n length of time series object\n \"\"\"\n self.length = self.time_tensor.__len__()[1]\n return self.length\n\n def __str__(self):\n \"\"\"Return discription of time series object.\n\n Returns\n ----------\n _str : str\n discription of time series object\n \"\"\"\n import pandas as pd\n _str = f\"\"\"\n Time Series Object\n Dimension Size: {self.dimension}\n Length: {self.length}\n Time Length: {round(self.time_tensor.max() - self.time_tensor.min(), 3)}\n Frequency: {round(self.freq, 3)}\n Start Timestamp: {round(self.startts, 3)}\n Labels: {', '.join([str(x) for x in self.labels])}\n \"\"\"\n columns = ['Time']\n columns.extend(self.labels)\n print(pd.DataFrame(DTensor([self.time_tensor]).concatenate(self.val_tensor, axis=0)._data.T,\n columns=columns))\n return _str\n\n def __copy__(self):\n \"\"\"Return copy of time series object.\n\n Returns\n ----------\n object\n copy of time series object\n \"\"\"\n import copy\n time_tensor = copy.copy(self.time_tensor)\n val_tensor = copy.copy(self.val_tensor).T\n labels = copy.copy(self.labels)\n return Timeseries(val_tensor, time_tensor, labels)\n\n def resample(self, resampled_freq: int, inplace: bool = False, show: bool = False):\n \"\"\"Resample series data with a new frequency, acomplished on the basis of scipy.signal.sample.\n\n Parameters\n ----------\n resampled_freq : int\n resampled frequency\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new resampled object \n \"\"\"\n _ori_tensor = DTensor.from_numpy(self.val_tensor._data.copy())\n _ori_freq = self.freq\n _self = self.__handle_inplace(inplace)\n new_len = int(_self.length / _self.freq * resampled_freq)\n _self.val_tensor.resample(new_len, inplace=True)\n _self.__update_time(_self.val_tensor, resampled_freq, _self.startts)\n _self.__update_info(_self.labels, _self.time_tensor, _self.val_tensor)\n if show:\n from spartan.util.drawutil import plot_resampled_series\n plot_resampled_series(self, self.length, _self.length, _ori_freq, resampled_freq, _ori_tensor._data, _self.val_tensor._data, _self.startts)\n if not inplace:\n return _self\n\n def add_columns(self, attrs: list or str, values: [int, float, DTensor, list] = None, inplace: bool = False, show: bool = False):\n \"\"\"Add new equal-length columns to Time series object.\n\n Parameters\n ----------\n attrs : list or str\n list or string of column names\n\n values: [int, float, DTensor, list]\n if type of values is int or float, function will create a equal-length ndarray filled with values\n if type of values is DTensor or list, function will judge the length, then add columns\n default is None\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with columns added\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _names_type = type(attrs)\n _values_type = type(values)\n if _names_type == str:\n if _values_type in [int, float]:\n _self.__add_single_column(attrs, values, _type='number')\n elif _values_type == DTensor:\n assert len(values.shape) == 1\n _self.__add_single_column(attrs, values, _type='tensor')\n elif _values_type == list:\n assert len(values) == 1\n _self.__add_single_column(attrs, values[0], _type='number')\n else:\n raise TypeError(f\"Inappropriate values type of {type(values)}\")\n elif _names_type == list:\n if _values_type == DTensor:\n assert values.shape[0] == len(attrs)\n _self.__add_multi_columns(attrs, values, _type='tensor')\n elif _values_type == list:\n assert len(values) == len(attrs)\n _value_type = type(values[0])\n if _value_type in [int, float]:\n _self.__add_multi_columns(attrs, values, _type='number')\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def __add_multi_columns(self, attrs: list, tensor: DTensor, _type: str):\n \"\"\"Private function for adding multiple columns, adding operation is finished by concatenate.\n\n Parameters\n ----------\n attrs : list\n list of column names\n\n tensor : DTensor\n tensor to be added\n\n _type : str\n if number, function will create an equal-length ndarray for DTensor\n if tensor, function will concatenate directly\n \"\"\"\n if _type == 'number':\n import numpy as np\n tensor = DTensor.from_numpy(np.tile(np.array([tensor]).T, (1, self.length)))\n elif _type == 'tensor':\n tensor = tensor\n self.val_tensor.concatenate(tensor, inplace=True)\n self.labels.extend(attrs)\n self.dimension += len(attrs)\n\n def __add_single_column(self, attr: str, value: DTensor, _type: str):\n \"\"\"Private function for adding single column, adding operation is finished by concatenate.\n\n Parameters\n ----------\n columns_names : str\n string of column name\n\n tensor : DTensor\n tensor to be added\n\n _type : str\n if number, function will create an equal-length ndarray for DTensor\n if tensor, function will concatenate directly\n \"\"\"\n if _type == 'number':\n import numpy as np\n _data = DTensor.from_numpy(np.array([[value] * self.length]))\n elif _type == 'tensor':\n _data = value\n self.val_tensor.concatenate(_data, inplace=True)\n self.labels.append(attr)\n self.dimension += 1\n\n def concat(self, series: list or \"Timeseries\", inplace: bool = False, show: bool = False):\n \"\"\"Concatenate self with another timeseries object with the same dimension.\n\n Parameters\n ----------\n series : list or Timeseries\n list of Timeseries object or Timeseries object\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with columns concatenated\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _type = type(series)\n if _type == list:\n _series = []\n for x in series:\n if type(x) == Timeseries:\n _series.append(x.__copy__())\n else:\n raise Exception(f'list contains non-Timeseries object')\n _self.__concat_several(_series)\n elif _type == Timeseries:\n _self.__concat_one(series.__copy__())\n _self.__update_time(_self.val_tensor, _self.freq, _self.startts)\n _self.__update_info(_self.labels, _self.time_tensor, _self.val_tensor)\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def __concat_one(self, serie: \"Timeseries\"):\n \"\"\"Private function for concating single object.\n\n Parameters\n ----------\n serie : Timeseries\n serie to be concatenated\n \"\"\"\n if not self.dimension == serie.dimension:\n raise Exception(f'dimension sizes are not the same with self {self.dimension} and object {serie.dimension}')\n for i in range(len(self.labels)):\n if not self.labels[i] == serie.labels[i]:\n raise Exception(f'{i}th dimension is not corresponding with self {self.labels[i]} and object {serie.labels[i]}')\n self.val_tensor.concatenate(serie.val_tensor, axis=1, inplace=True)\n\n def __concat_several(self, concated_series: list):\n \"\"\"Private function for concating several objects.\n\n Parameters\n ----------\n concated_series : list\n list of timeseries object to be concatenated\n \"\"\"\n for serie in concated_series:\n self.__concat_one(serie)\n\n def combine(self, series: \"Timeseries\" or list, inplace: bool = False, show: bool = False):\n \"\"\"Combine self with columns of other timeseries objects which have the same length.\n\n Parameters\n ----------\n series : list or Timeseries\n list of Timeseries object or Timeseries object\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with columns combined\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _type = type(series)\n if _type == list:\n _series = []\n for x in series:\n if type(x) == Timeseries:\n _series.append(x.__copy__())\n else:\n raise Exception(f'list contains non-STTimeseries object')\n _self.__combine_several(_series)\n elif _type == Timeseries:\n _self.__combine_one(series.__copy__())\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def __combine_one(self, serie: \"Timeseries\"):\n \"\"\"Private function for combining single object.\n\n Parameters\n ----------\n serie : Timeseries\n serie to be combined\n \"\"\"\n if not self.freq == serie.freq:\n raise Exception(f'Frequency not matched, with {self.freq} and {serie.freq}')\n for label in serie.labels:\n if label in self.labels:\n for i in range(1, 10000):\n if not (label + '_' + str(i)) in self.labels:\n self.labels.extend([label + '_' + str(i)])\n break\n else:\n self.labels.extend([label])\n self.dimension += serie.dimension\n self.val_tensor.concatenate(serie.val_tensor, axis=0, inplace=True)\n\n def __combine_several(self, combined_series: list):\n \"\"\"Private function for combining several objects.\n\n Parameters\n ----------\n combined_series : list\n list of timeseries object to be combined\n \"\"\"\n for serie in combined_series:\n self.__combine_one(serie)\n\n def extract(self, attrs: list or str = None, inplace: bool = False, show: bool = False):\n \"\"\"Extract specific columns from self.\n\n Parameters\n ----------\n attrs : list or str\n list or string of column names, default is None\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with columns extracted\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _labels, _tensor = _self.__handle_attrs(attrs)\n _self.__update_info(_labels, _self.time_tensor, _tensor)\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def cut(self, start: int = None, end: int = None, attrs: list = None, form: str = 'point', inplace: bool = False, show: bool = False):\n \"\"\"Cut sub sequence from chosen attribute columns.\n\n Parameters\n ----------\n start : int\n start timetick or point, default is None, cut from the very front position\n\n end : int\n end timetick or point, default is None, cut to the very last position\n\n attrs : list or str\n list or string of column names, default is None, return the all columns\n\n form : str\n type of start and end\n if 'point', start and end stand for positions of points\n if 'time', start and end stand for timeticks of points\n default is 'point'\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with tensor cut\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _labels, _tensor = _self.__handle_attrs(attrs)\n if form == 'point':\n start = start\n end = end\n elif form == 'time':\n if not start is None:\n start = int((start-_self.startts) * _self.freq)\n if not end is None:\n end = int((end-_self.startts) * _self.freq)\n else:\n raise Exception('Value of parameter form is not defined!')\n if start is None:\n start = 0\n if end is None:\n end = _self.length\n if start < 0 or end > _self.length:\n raise Exception(f'start pos: {start} with 0 and end pos {end} with {_self.length}')\n _self.time_tensor.cut(start, end, inplace=True)\n _tensor.cut(start, end, inplace=True)\n _self.__update_info(_labels, _self.time_tensor, _tensor)\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def normalize(self, attrs: list or str = None, strategy: str = 'minmax', inplace: bool = False, show: bool = False):\n \"\"\"Normalize data in value_tensor.\n\n Parameters\n ----------\n attrs : list or str\n list or string of column names, default is None\n\n strategy : str\n strategy for normalization\n if 'minmax', normalize to [-1, 1]\n default is 'minmax'\n\n inplace : bool\n update origin object or return a new object, default is False\n\n show : bool\n if True, draw plot\n\n Returns\n ----------\n None or Timeseries object\n self or a new object with tensor normalized\n \"\"\"\n _self = self.__handle_inplace(inplace)\n _labels, _tensor = _self.__handle_attrs(attrs)\n if strategy == 'minmax':\n _tensor = _self.__normalize_minmax(_tensor)\n else:\n raise TypeError(f'strategy: {strategy} is not supported.')\n _self.__update_info(_labels, _self.time_tensor, _tensor)\n _self.__handle_plot(show)\n if not inplace:\n return _self\n\n def __normalize_minmax(self, _tensor: DTensor):\n \"\"\"Private function for normalize value tensor by minmax function.\n\n Parameters\n ----------\n _tensor : DTensor\n value tensor to be normalized by minmax function\n\n Returns\n ----------\n _tensor : DTensor\n normalized tensor\n \"\"\"\n import numpy as np\n _min = np.tile(_tensor.min(axis=1).reshape((self.dimension, 1)), self.length)\n _max = np.tile(_tensor.max(axis=1).reshape((self.dimension, 1)), self.length)\n _middle = (_min + _max) / 2\n _tensor = (_tensor - _middle) / (_max - _min) * 2\n return _tensor\n\n def __handle_plot(self, show: bool):\n \"\"\"Private function for plotting.\n\n Parameters\n ----------\n show : bool\n if True, call plot function in drawutils\n \"\"\"\n from spartan.util.drawutil import plot_timeseries\n if show:\n plot_timeseries(self)\n\n def __handle_attrs(self, attrs: str or list):\n \"\"\"Private function for checking labels and tensor of column names in attrs.\n\n Parameters\n ----------\n attrs : list or str\n list or string of column names\n\n Raises\n ----------\n TypeError:\n Raise if attrs is not str or list\n\n Exception:\n Raise if attrs has column names which are not in self.labels\n\n Returns\n ----------\n _labels, _tensor : list, DTensor\n Selected labels and value tensor\n \"\"\"\n if type(attrs) == str:\n attrs = [attrs]\n elif type(attrs) == list:\n attrs = attrs\n elif attrs is not None:\n raise TypeError(f'Type of attrs: {type(attrs)}')\n if attrs is None:\n _labels = self.labels\n _tensor = self.val_tensor\n else:\n _labels = []\n _tensor = []\n for attr in attrs:\n if not attr in self.labels:\n raise Exception(f'Attr {attr} is not found')\n _labels.append(attr)\n index = self.labels.index(attr)\n _tensor.append(self.val_tensor._data[index])\n _tensor = DTensor(_tensor)\n return _labels, _tensor\n\n def __handle_inplace(self, inplace: bool = False):\n \"\"\"Private function for checking if a new object is needed\n\n Parameters\n ----------\n inplace : bool\n update origin object or return a new object, default is False\n\n Returns\n ----------\n None or Timeseries object\n self or a new object\n \"\"\"\n if inplace:\n _self = self\n else:\n import copy\n _self = copy.copy(self)\n return _self\n\n def __update_info(self, _labels: list, _time: DTensor, _tensor: DTensor):\n \"\"\"Update infomation of self from newly updated tensors.\n\n Parameters\n ----------\n _labels : list\n list of column names\n\n _time : DTensor\n time tensor\n\n _tensor : DTensor\n value tensor\n \"\"\"\n assert len(_labels) == len(_tensor)\n self.labels, self.time_tensor = _labels, _time\n self.val_tensor, self.dimension = _tensor, len(_tensor)\n self.startts = self.time_tensor[0]\n self.length = self.val_tensor.shape[1]\n\n def __update_time(self, val_tensor: DTensor, freq: int, startts: int):\n \"\"\"Update infomation of self from newly updated tensors.\n\n Parameters\n ----------\n val_tensor : DTensor\n value tensor\n\n freq : int\n frequency of series\n\n startts : int\n start time tick\n \"\"\"\n _len = val_tensor.shape[1]\n self.length = _len\n self.time_tensor = self.__init_time(_len, freq, startts)\n self.freq = freq\n \n \n def __init_time(self, len: int, freq: int, startts: int):\n \"\"\"Construct time tensor.\n\n Parameters\n ----------\n len : int\n length of time tensor\n\n freq : int\n frequency of series\n\n startts : int\n start time tick\n \n Returns\n ----------\n time_tensor : DTensor\n time tensor\n \"\"\"\n import numpy as np\n time_tensor = DTensor.from_numpy(np.linspace(startts, 1 / freq * len + startts - 1, len))\n return time_tensor\n"
]
| [
[
"numpy.array",
"numpy.linspace"
]
]
|
sarvex/rasa | [
"ac1197b3f80071bb213d4fa66d5d24b4fc01e30e"
]
| [
"rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py"
]
| [
"import numpy as np\nimport logging\n\nfrom typing import Any, Optional, Text, List, Type, Dict, Tuple\n\nimport rasa.core.utils\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.components import Component, UnsupportedLanguageError\nfrom rasa.nlu.featurizers.featurizer import DenseFeaturizer\nfrom rasa.nlu.model import Metadata\nimport rasa.shared.utils.io\nfrom rasa.shared.nlu.training_data.features import Features\nfrom rasa.nlu.tokenizers.tokenizer import Tokenizer, Token\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.nlu.constants import (\n DENSE_FEATURIZABLE_ATTRIBUTES,\n SEQUENCE_FEATURES,\n SENTENCE_FEATURES,\n FEATURIZER_CLASS_ALIAS,\n NO_LENGTH_RESTRICTION,\n NUMBER_OF_SUB_TOKENS,\n TOKENS_NAMES,\n LANGUAGE_MODEL_DOCS,\n)\nfrom rasa.shared.nlu.constants import (\n TEXT,\n FEATURE_TYPE_SENTENCE,\n FEATURE_TYPE_SEQUENCE,\n ACTION_TEXT,\n)\nfrom rasa.utils import train_utils\n\nMAX_SEQUENCE_LENGTHS = {\n \"bert\": 512,\n \"gpt\": 512,\n \"gpt2\": 512,\n \"xlnet\": NO_LENGTH_RESTRICTION,\n \"distilbert\": 512,\n \"roberta\": 512,\n}\n\nlogger = logging.getLogger(__name__)\n\n\nclass LanguageModelFeaturizer(DenseFeaturizer):\n \"\"\"Featurizer using transformer-based language models.\n\n The transformers(https://github.com/huggingface/transformers) library\n is used to load pre-trained language models like BERT, GPT-2, etc.\n The component also tokenizes and featurizes dense featurizable attributes of\n each message.\n \"\"\"\n\n defaults = {\n # name of the language model to load.\n \"model_name\": \"bert\",\n # Pre-Trained weights to be loaded(string)\n \"model_weights\": None,\n # an optional path to a specific directory to download\n # and cache the pre-trained model weights.\n \"cache_dir\": None,\n }\n\n @classmethod\n def required_components(cls) -> List[Type[Component]]:\n \"\"\"Packages needed to be installed.\"\"\"\n return [Tokenizer]\n\n def __init__(\n self,\n component_config: Optional[Dict[Text, Any]] = None,\n skip_model_load: bool = False,\n hf_transformers_loaded: bool = False,\n ) -> None:\n \"\"\"Initializes LanguageModelFeaturizer with the specified model.\n\n Args:\n component_config: Configuration for the component.\n skip_model_load: Skip loading the model for pytests.\n hf_transformers_loaded: Skip loading of model and metadata, use\n HFTransformers output instead.\n \"\"\"\n super(LanguageModelFeaturizer, self).__init__(component_config)\n if hf_transformers_loaded:\n return\n self._load_model_metadata()\n self._load_model_instance(skip_model_load)\n\n @classmethod\n def create(\n cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig\n ) -> \"DenseFeaturizer\":\n language = config.language\n if not cls.can_handle_language(language):\n # check failed\n raise UnsupportedLanguageError(cls.name, language)\n # TODO: remove this when HFTransformersNLP is removed for good\n if isinstance(config, Metadata):\n hf_transformers_loaded = \"HFTransformersNLP\" in [\n c[\"name\"] for c in config.metadata[\"pipeline\"]\n ]\n else:\n hf_transformers_loaded = \"HFTransformersNLP\" in config.component_names\n return cls(component_config, hf_transformers_loaded=hf_transformers_loaded)\n\n @classmethod\n def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Text,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n \"\"\"Load this component from file.\n\n After a component has been trained, it will be persisted by\n calling `persist`. When the pipeline gets loaded again,\n this component needs to be able to restore itself.\n Components can rely on any context attributes that are\n created by :meth:`components.Component.create`\n calls to components previous to this one.\n\n This method differs from the parent method only in that it calls create\n rather than the constructor if the component is not found. This is to\n trigger the check for HFTransformersNLP and the method can be removed\n when HFTRansformersNLP is removed.\n\n Args:\n meta: Any configuration parameter related to the model.\n model_dir: The directory to load the component from.\n model_metadata: The model's :class:`rasa.nlu.model.Metadata`.\n cached_component: The cached component.\n\n Returns:\n the loaded component\n \"\"\"\n # TODO: remove this when HFTransformersNLP is removed for good\n if cached_component:\n return cached_component\n\n return cls.create(meta, model_metadata)\n\n def _load_model_metadata(self) -> None:\n \"\"\"Load the metadata for the specified model and sets these properties.\n\n This includes the model name, model weights, cache directory and the\n maximum sequence length the model can handle.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_weights_defaults,\n )\n\n self.model_name = self.component_config[\"model_name\"]\n\n if self.model_name not in model_class_dict:\n raise KeyError(\n f\"'{self.model_name}' not a valid model name. Choose from \"\n f\"{str(list(model_class_dict.keys()))} or create\"\n f\"a new class inheriting from this class to support your model.\"\n )\n\n self.model_weights = self.component_config[\"model_weights\"]\n self.cache_dir = self.component_config[\"cache_dir\"]\n\n if not self.model_weights:\n logger.info(\n f\"Model weights not specified. Will choose default model \"\n f\"weights: {model_weights_defaults[self.model_name]}\"\n )\n self.model_weights = model_weights_defaults[self.model_name]\n\n self.max_model_sequence_length = MAX_SEQUENCE_LENGTHS[self.model_name]\n\n def _load_model_instance(self, skip_model_load: bool) -> None:\n \"\"\"Try loading the model instance.\n\n Args:\n skip_model_load: Skip loading the model instances to save time. This\n should be True only for pytests\n \"\"\"\n if skip_model_load:\n # This should be True only during pytests\n return\n\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_tokenizer_dict,\n )\n\n logger.debug(f\"Loading Tokenizer and Model for {self.model_name}\")\n\n self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n self.model = model_class_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n\n # Use a universal pad token since all transformer architectures do not have a\n # consistent token. Instead of pad_token_id we use unk_token_id because\n # pad_token_id is not set for all architectures. We can't add a new token as\n # well since vocabulary resizing is not yet supported for TF classes.\n # Also, this does not hurt the model predictions since we use an attention mask\n # while feeding input.\n self.pad_token_id = self.tokenizer.unk_token_id\n\n @classmethod\n def cache_key(\n cls, component_meta: Dict[Text, Any], model_metadata: Metadata\n ) -> Optional[Text]:\n \"\"\"Cache the component for future use.\n\n Args:\n component_meta: configuration for the component.\n model_metadata: configuration for the whole pipeline.\n\n Returns: key of the cache for future retrievals.\n \"\"\"\n weights = component_meta.get(\"model_weights\") or {}\n\n return (\n f\"{cls.name}-{component_meta.get('model_name')}-\"\n f\"{rasa.shared.utils.io.deep_container_fingerprint(weights)}\"\n )\n\n @classmethod\n def required_packages(cls) -> List[Text]:\n \"\"\"Packages needed to be installed.\"\"\"\n return [\"transformers\"]\n\n def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:\n \"\"\"Pass the text through the tokenizer of the language model.\n\n Args:\n text: Text to be tokenized.\n\n Returns: List of token ids and token strings.\n \"\"\"\n split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)\n\n split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)\n\n return split_token_ids, split_token_strings\n\n def _add_lm_specific_special_tokens(\n self, token_ids: List[List[int]]\n ) -> List[List[int]]:\n \"\"\"Add language model specific special tokens which were used during\n their training.\n\n Args:\n token_ids: List of token ids for each example in the batch.\n\n Returns: Augmented list of token ids for each example in the batch.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_special_tokens_pre_processors,\n )\n\n augmented_tokens = [\n model_special_tokens_pre_processors[self.model_name](example_token_ids)\n for example_token_ids in token_ids\n ]\n return augmented_tokens\n\n def _lm_specific_token_cleanup(\n self, split_token_ids: List[int], token_strings: List[Text]\n ) -> Tuple[List[int], List[Text]]:\n \"\"\"Clean up special chars added by tokenizers of language models.\n\n Many language models add a special char in front/back of (some) words. We clean\n up those chars as they are not\n needed once the features are already computed.\n\n Args:\n split_token_ids: List of token ids received as output from the language\n model specific tokenizer.\n token_strings: List of token strings received as output from the language\n model specific tokenizer.\n\n Returns: Cleaned up token ids and token strings.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners\n\n return model_tokens_cleaners[self.model_name](split_token_ids, token_strings)\n\n def _post_process_sequence_embeddings(\n self, sequence_embeddings: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute sentence and sequence level representations for relevant tokens.\n\n Args:\n sequence_embeddings: Sequence level dense features received as output from\n language model.\n\n Returns: Sentence and sequence level representations.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_embeddings_post_processors,\n )\n\n sentence_embeddings = []\n post_processed_sequence_embeddings = []\n\n for example_embedding in sequence_embeddings:\n (\n example_sentence_embedding,\n example_post_processed_embedding,\n ) = model_embeddings_post_processors[self.model_name](example_embedding)\n\n sentence_embeddings.append(example_sentence_embedding)\n post_processed_sequence_embeddings.append(example_post_processed_embedding)\n\n return (\n np.array(sentence_embeddings),\n np.array(post_processed_sequence_embeddings),\n )\n\n def _tokenize_example(\n self, message: Message, attribute: Text\n ) -> Tuple[List[Token], List[int]]:\n \"\"\"Tokenize a single message example.\n\n Many language models add a special char in front of (some) words and split\n words into sub-words. To ensure the entity start and end values matches the\n token values, use the tokens produced by the Tokenizer component. If\n individual tokens are split up into multiple tokens, we add this information\n to the respected token.\n\n Args:\n message: Single message object to be processed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for the corresponding\n attribute of the message.\n \"\"\"\n tokens_in = message.get(TOKENS_NAMES[attribute])\n tokens_out = []\n\n token_ids_out = []\n\n for token in tokens_in:\n # use lm specific tokenizer to further tokenize the text\n split_token_ids, split_token_strings = self._lm_tokenize(token.text)\n\n if not split_token_ids:\n # fix the situation that `token.text` only contains whitespace or other\n # special characters, which cause `split_token_ids` and\n # `split_token_strings` be empty, finally cause\n # `self._lm_specific_token_cleanup()` to raise an exception\n continue\n\n (split_token_ids, split_token_strings) = self._lm_specific_token_cleanup(\n split_token_ids, split_token_strings\n )\n\n token_ids_out += split_token_ids\n\n token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings))\n\n tokens_out.append(token)\n\n return tokens_out, token_ids_out\n\n def _get_token_ids_for_batch(\n self, batch_examples: List[Message], attribute: Text\n ) -> Tuple[List[List[Token]], List[List[int]]]:\n \"\"\"Compute token ids and token strings for each example in batch.\n\n A token id is the id of that token in the vocabulary of the language model.\n\n Args:\n batch_examples: Batch of message objects for which tokens need to be\n computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for each example in the batch.\n \"\"\"\n batch_token_ids = []\n batch_tokens = []\n for example in batch_examples:\n\n example_tokens, example_token_ids = self._tokenize_example(\n example, attribute\n )\n batch_tokens.append(example_tokens)\n batch_token_ids.append(example_token_ids)\n\n return batch_tokens, batch_token_ids\n\n @staticmethod\n def _compute_attention_mask(\n actual_sequence_lengths: List[int], max_input_sequence_length: int\n ) -> np.ndarray:\n \"\"\"Compute a mask for padding tokens.\n\n This mask will be used by the language model so that it does not attend to\n padding tokens.\n\n Args:\n actual_sequence_lengths: List of length of each example without any\n padding.\n max_input_sequence_length: Maximum length of a sequence that will be\n present in the input batch. This is\n after taking into consideration the maximum input sequence the model\n can handle. Hence it can never be\n greater than self.max_model_sequence_length in case the model\n applies length restriction.\n\n Returns: Computed attention mask, 0 for padding and 1 for non-padding\n tokens.\n \"\"\"\n attention_mask = []\n\n for actual_sequence_length in actual_sequence_lengths:\n # add 1s for present tokens, fill up the remaining space up to max\n # sequence length with 0s (non-existing tokens)\n padded_sequence = [1] * min(\n actual_sequence_length, max_input_sequence_length\n ) + [0] * (\n max_input_sequence_length\n - min(actual_sequence_length, max_input_sequence_length)\n )\n attention_mask.append(padded_sequence)\n\n attention_mask = np.array(attention_mask).astype(np.float32)\n return attention_mask\n\n def _extract_sequence_lengths(\n self, batch_token_ids: List[List[int]]\n ) -> Tuple[List[int], int]:\n \"\"\"Extracts the sequence length for each example and maximum sequence length.\n\n Args:\n batch_token_ids: List of token ids for each example in the batch.\n\n Returns:\n Tuple consisting of: the actual sequence lengths for each example,\n and the maximum input sequence length (taking into account the\n maximum sequence length that the model can handle.\n \"\"\"\n # Compute max length across examples\n max_input_sequence_length = 0\n actual_sequence_lengths = []\n\n for example_token_ids in batch_token_ids:\n sequence_length = len(example_token_ids)\n actual_sequence_lengths.append(sequence_length)\n max_input_sequence_length = max(\n max_input_sequence_length, len(example_token_ids)\n )\n\n # Take into account the maximum sequence length the model can handle\n max_input_sequence_length = (\n max_input_sequence_length\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION\n else min(max_input_sequence_length, self.max_model_sequence_length)\n )\n\n return actual_sequence_lengths, max_input_sequence_length\n\n def _add_padding_to_batch(\n self, batch_token_ids: List[List[int]], max_sequence_length_model: int\n ) -> List[List[int]]:\n \"\"\"Add padding so that all examples in the batch are of the same length.\n\n Args:\n batch_token_ids: Batch of examples where each example is a non-padded list\n of token ids.\n max_sequence_length_model: Maximum length of any input sequence in the batch\n to be fed to the model.\n\n Returns:\n Padded batch with all examples of the same length.\n \"\"\"\n padded_token_ids = []\n\n # Add padding according to max_sequence_length\n # Some models don't contain pad token, we use unknown token as padding token.\n # This doesn't affect the computation since we compute an attention mask\n # anyways.\n for example_token_ids in batch_token_ids:\n\n # Truncate any longer sequences so that they can be fed to the model\n if len(example_token_ids) > max_sequence_length_model:\n example_token_ids = example_token_ids[:max_sequence_length_model]\n\n padded_token_ids.append(\n example_token_ids\n + [self.pad_token_id]\n * (max_sequence_length_model - len(example_token_ids))\n )\n return padded_token_ids\n\n @staticmethod\n def _extract_nonpadded_embeddings(\n embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Extract embeddings for actual tokens.\n\n Use pre-computed non-padded lengths of each example to extract embeddings\n for non-padding tokens.\n\n Args:\n embeddings: sequence level representations for each example of the batch.\n actual_sequence_lengths: non-padded lengths of each example of the batch.\n\n Returns:\n Sequence level embeddings for only non-padding tokens of the batch.\n \"\"\"\n nonpadded_sequence_embeddings = []\n for index, embedding in enumerate(embeddings):\n unmasked_embedding = embedding[: actual_sequence_lengths[index]]\n nonpadded_sequence_embeddings.append(unmasked_embedding)\n\n return np.array(nonpadded_sequence_embeddings)\n\n def _compute_batch_sequence_features(\n self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]\n ) -> np.ndarray:\n \"\"\"Feed the padded batch to the language model.\n\n Args:\n batch_attention_mask: Mask of 0s and 1s which indicate whether the token\n is a padding token or not.\n padded_token_ids: Batch of token ids for each example. The batch is padded\n and hence can be fed at once.\n\n Returns:\n Sequence level representations from the language model.\n \"\"\"\n model_outputs = self.model(\n np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)\n )\n\n # sequence hidden states is always the first output from all models\n sequence_hidden_states = model_outputs[0]\n\n sequence_hidden_states = sequence_hidden_states.numpy()\n return sequence_hidden_states\n\n def _validate_sequence_lengths(\n self,\n actual_sequence_lengths: List[int],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> None:\n \"\"\"Validate if sequence lengths of all inputs are less the max sequence\n length the model can handle.\n\n This method should throw an error during training, whereas log a debug\n message during inference if any of the input examples have a length\n greater than maximum sequence length allowed.\n\n Args:\n actual_sequence_lengths: original sequence length of all inputs\n batch_examples: all message instances in the batch\n attribute: attribute of message object to be processed\n inference_mode: Whether this is during training or during inferencing\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # There is no restriction on sequence length from the model\n return\n\n for sequence_length, example in zip(actual_sequence_lengths, batch_examples):\n if sequence_length > self.max_model_sequence_length:\n if not inference_mode:\n raise RuntimeError(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Either shorten the message or use a model which has no \"\n f\"restriction on input sequence length like XLNet.\"\n )\n logger.debug(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Downstream model predictions may be affected because of this.\"\n )\n\n def _add_extra_padding(\n self, sequence_embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Add extra zero padding to match the original sequence length.\n\n This is only done if the input was truncated during the batch\n preparation of input for the model.\n Args:\n sequence_embeddings: Embeddings returned from the model\n actual_sequence_lengths: original sequence length of all inputs\n\n Returns:\n Modified sequence embeddings with padding if necessary\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # No extra padding needed because there wouldn't have been any\n # truncation in the first place\n return sequence_embeddings\n\n reshaped_sequence_embeddings = []\n for index, embedding in enumerate(sequence_embeddings):\n embedding_size = embedding.shape[-1]\n if actual_sequence_lengths[index] > self.max_model_sequence_length:\n embedding = np.concatenate(\n [\n embedding,\n np.zeros(\n (\n actual_sequence_lengths[index]\n - self.max_model_sequence_length,\n embedding_size,\n ),\n dtype=np.float32,\n ),\n ]\n )\n reshaped_sequence_embeddings.append(embedding)\n\n return np.array(reshaped_sequence_embeddings)\n\n def _get_model_features_for_batch(\n self,\n batch_token_ids: List[List[int]],\n batch_tokens: List[List[Token]],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute dense features of each example in the batch.\n\n We first add the special tokens corresponding to each language model. Next, we\n add appropriate padding and compute a mask for that padding so that it doesn't\n affect the feature computation. The padded batch is next fed to the language\n model and token level embeddings are computed. Using the pre-computed mask,\n embeddings for non-padding tokens are extracted and subsequently sentence\n level embeddings are computed.\n\n Args:\n batch_token_ids: List of token ids of each example in the batch.\n batch_tokens: List of token objects for each example in the batch.\n batch_examples: List of examples in the batch.\n attribute: attribute of the Message object to be processed.\n inference_mode: Whether the call is during training or during inference.\n\n Returns:\n Sentence and token level dense representations.\n \"\"\"\n # Let's first add tokenizer specific special tokens to all examples\n batch_token_ids_augmented = self._add_lm_specific_special_tokens(\n batch_token_ids\n )\n\n # Compute sequence lengths for all examples\n (\n actual_sequence_lengths,\n max_input_sequence_length,\n ) = self._extract_sequence_lengths(batch_token_ids_augmented)\n\n # Validate that all sequences can be processed based on their sequence\n # lengths and the maximum sequence length the model can handle\n self._validate_sequence_lengths(\n actual_sequence_lengths, batch_examples, attribute, inference_mode\n )\n\n # Add padding so that whole batch can be fed to the model\n padded_token_ids = self._add_padding_to_batch(\n batch_token_ids_augmented, max_input_sequence_length\n )\n\n # Compute attention mask based on actual_sequence_length\n batch_attention_mask = self._compute_attention_mask(\n actual_sequence_lengths, max_input_sequence_length\n )\n\n # Get token level features from the model\n sequence_hidden_states = self._compute_batch_sequence_features(\n batch_attention_mask, padded_token_ids\n )\n\n # Extract features for only non-padding tokens\n sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(\n sequence_hidden_states, actual_sequence_lengths\n )\n\n # Extract sentence level and post-processed features\n (\n sentence_embeddings,\n sequence_embeddings,\n ) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)\n\n # Pad zeros for examples which were truncated in inference mode.\n # This is intentionally done after sentence embeddings have been\n # extracted so that they are not affected\n sequence_embeddings = self._add_extra_padding(\n sequence_embeddings, actual_sequence_lengths\n )\n\n # shape of matrix for all sequence embeddings\n batch_dim = len(sequence_embeddings)\n seq_dim = max(e.shape[0] for e in sequence_embeddings)\n feature_dim = sequence_embeddings[0].shape[1]\n shape = (batch_dim, seq_dim, feature_dim)\n\n # align features with tokens so that we have just one vector per token\n # (don't include sub-tokens)\n sequence_embeddings = train_utils.align_token_features(\n batch_tokens, sequence_embeddings, shape\n )\n\n # sequence_embeddings is a padded numpy array\n # remove the padding, keep just the non-zero vectors\n sequence_final_embeddings = []\n for embeddings, tokens in zip(sequence_embeddings, batch_tokens):\n sequence_final_embeddings.append(embeddings[: len(tokens)])\n sequence_final_embeddings = np.array(sequence_final_embeddings)\n\n return sentence_embeddings, sequence_final_embeddings\n\n def _get_docs_for_batch(\n self,\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> List[Dict[Text, Any]]:\n \"\"\"Compute language model docs for all examples in the batch.\n\n Args:\n batch_examples: Batch of message objects for which language model docs\n need to be computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n inference_mode: Whether the call is during inference or during training.\n\n\n Returns:\n List of language model docs for each message in batch.\n \"\"\"\n hf_transformers_doc = batch_examples[0].get(LANGUAGE_MODEL_DOCS[attribute])\n if hf_transformers_doc:\n # This should only be the case if the deprecated\n # HFTransformersNLP component is used in the pipeline\n # TODO: remove this when HFTransformersNLP is removed for good\n logging.debug(\n f\"'{LANGUAGE_MODEL_DOCS[attribute]}' set: this \"\n f\"indicates you're using the deprecated component \"\n f\"HFTransformersNLP, please remove it from your \"\n f\"pipeline.\"\n )\n return [ex.get(LANGUAGE_MODEL_DOCS[attribute]) for ex in batch_examples]\n\n batch_tokens, batch_token_ids = self._get_token_ids_for_batch(\n batch_examples, attribute\n )\n\n (\n batch_sentence_features,\n batch_sequence_features,\n ) = self._get_model_features_for_batch(\n batch_token_ids, batch_tokens, batch_examples, attribute, inference_mode\n )\n\n # A doc consists of\n # {'sequence_features': ..., 'sentence_features': ...}\n batch_docs = []\n for index in range(len(batch_examples)):\n doc = {\n SEQUENCE_FEATURES: batch_sequence_features[index],\n SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),\n }\n batch_docs.append(doc)\n\n return batch_docs\n\n def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Compute tokens and dense features for each message in training data.\n\n Args:\n training_data: NLU training data to be tokenized and featurized\n config: NLU pipeline config consisting of all components.\n \"\"\"\n batch_size = 64\n\n for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:\n\n non_empty_examples = list(\n filter(lambda x: x.get(attribute), training_data.training_examples)\n )\n\n batch_start_index = 0\n\n while batch_start_index < len(non_empty_examples):\n\n batch_end_index = min(\n batch_start_index + batch_size, len(non_empty_examples)\n )\n # Collect batch examples\n batch_messages = non_empty_examples[batch_start_index:batch_end_index]\n\n # Construct a doc with relevant features\n # extracted(tokens, dense_features)\n batch_docs = self._get_docs_for_batch(batch_messages, attribute)\n\n for index, ex in enumerate(batch_messages):\n self._set_lm_features(batch_docs[index], ex, attribute)\n batch_start_index += batch_size\n\n def process(self, message: Message, **kwargs: Any) -> None:\n \"\"\"Process an incoming message by computing its tokens and dense features.\n\n Args:\n message: Incoming message object\n \"\"\"\n # process of all featurizers operates only on TEXT and ACTION_TEXT attributes,\n # because all other attributes are labels which are featurized during training\n # and their features are stored by the model itself.\n for attribute in {TEXT, ACTION_TEXT}:\n if message.get(attribute):\n self._set_lm_features(\n self._get_docs_for_batch(\n [message], attribute=attribute, inference_mode=True\n )[0],\n message,\n attribute,\n )\n\n def _set_lm_features(\n self, doc: Dict[Text, Any], message: Message, attribute: Text = TEXT\n ) -> None:\n \"\"\"Adds the precomputed word vectors to the messages features.\"\"\"\n sequence_features = doc[SEQUENCE_FEATURES]\n sentence_features = doc[SENTENCE_FEATURES]\n\n final_sequence_features = Features(\n sequence_features,\n FEATURE_TYPE_SEQUENCE,\n attribute,\n self.component_config[FEATURIZER_CLASS_ALIAS],\n )\n message.add_features(final_sequence_features)\n final_sentence_features = Features(\n sentence_features,\n FEATURE_TYPE_SENTENCE,\n attribute,\n self.component_config[FEATURIZER_CLASS_ALIAS],\n )\n message.add_features(final_sentence_features)\n"
]
| [
[
"numpy.array",
"numpy.reshape",
"numpy.zeros"
]
]
|
lovelytt0/gMLP_phase | [
"39d39f092a79ee05eef3fe9268335840e2a56c71"
]
| [
"Scripts/continous_run.py"
]
| [
"import numpy as np\nimport matplotlib,h5py,os\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport pandas as pd\nfrom obspy.signal.trigger import trigger_onset\nfrom tqdm import tqdm\nfrom datetime import datetime\nfrom contextlib import redirect_stdout\nimport random\n# from IPython.utils import io\nfrom glob import glob\nimport json\nfrom obspy import read\n\n# sklearn packages\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve\n\n\nmatplotlib.rc('font', **{'size' : 15})\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nfrom obspy.core.utcdatetime import UTCDateTime\nfrom obspy.geodetics import base\nfrom obspy.taup import TauPyModel\nfrom obspy.core.event.catalog import read_events\n\nfrom gMLPhase.gMLP_torch import gMLPmodel\nfrom GNN_Tian.trainer import Graphmodel\nfrom obspy.geodetics.base import locations2degrees\n\nfrom torch import nn\n\nimport torch\n\ncat=read_events('/home/tian_feng/UCLA/Greece-2020-10-30/catalog.xml')\n\n\ndef _normalize( data, mode = 'max'): \n 'Normalize waveforms in each batch'\n\n data -= np.mean(data, axis=-1, keepdims=True)\n if mode == 'max':\n max_data = np.max(data, axis=-1, keepdims=True)\n assert(max_data.shape[-2] == data.shape[-2])\n max_data[max_data == 0] = 1\n data /= max_data \n\n elif mode == 'std': \n std_data = np.std(data, axis=-1, keepdims=True)\n assert(std_data.shape[-2] == data.shape[-2])\n std_data[std_data == 0] = 1\n data /= std_data\n return data\n\n\ndef detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',\n kpsh=False, valley=False, show=False, ax=None):\n\n \"\"\"Detect peaks in data based on their amplitude and other features.\n\n Parameters\n ----------\n x : 1D array_like\n data.\n mph : {None, number}, optional (default = None)\n detect peaks that are greater than minimum peak height.\n mpd : positive integer, optional (default = 1)\n detect peaks that are at least separated by minimum peak distance (in\n number of data).\n threshold : positive number, optional (default = 0)\n detect peaks (valleys) that are greater (smaller) than `threshold`\n in relation to their immediate neighbors.\n edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')\n for a flat peak, keep only the rising edge ('rising'), only the\n falling edge ('falling'), both edges ('both'), or don't detect a\n flat peak (None).\n kpsh : bool, optional (default = False)\n keep peaks with same height even if they are closer than `mpd`.\n valley : bool, optional (default = False)\n if True (1), detect valleys (local minima) instead of peaks.\n show : bool, optional (default = False)\n if True (1), plot data in matplotlib figure.\n ax : a matplotlib.axes.Axes instance, optional (default = None).\n\n Returns\n -------\n ind : 1D array_like\n indeces of the peaks in `x`.\n\n Notes\n -----\n The detection of valleys instead of peaks is performed internally by simply\n negating the data: `ind_valleys = detect_peaks(-x)`\n \n The function can handle NaN's \n\n See this IPython Notebook [1]_.\n\n References\n ----------\n .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb\n\n Examples\n --------\n >>> from detect_peaks import detect_peaks\n >>> x = np.random.randn(100)\n >>> x[60:81] = np.nan\n >>> # detect all peaks and plot data\n >>> ind = detect_peaks(x, show=True)\n >>> print(ind)\n\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\n >>> # set minimum peak height = 0 and minimum peak distance = 20\n >>> detect_peaks(x, mph=0, mpd=20, show=True)\n\n >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]\n >>> # set minimum peak distance = 2\n >>> detect_peaks(x, mpd=2, show=True)\n\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\n >>> # detection of valleys instead of peaks\n >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)\n\n >>> x = [0, 1, 1, 0, 1, 1, 0]\n >>> # detect both edges\n >>> detect_peaks(x, edge='both', show=True)\n\n >>> x = [-2, 1, -2, 2, 1, 1, 3, 0]\n >>> # set threshold = 2\n >>> detect_peaks(x, threshold = 2, show=True)\n \"\"\"\n\n x = np.atleast_1d(x).astype('float64')\n if x.size < 3:\n return np.array([], dtype=int)\n if valley:\n x = -x\n # find indices of all peaks\n dx = x[1:] - x[:-1]\n # handle NaN's\n indnan = np.where(np.isnan(x))[0]\n if indnan.size:\n x[indnan] = np.inf\n dx[np.where(np.isnan(dx))[0]] = np.inf\n ine, ire, ife = np.array([[], [], []], dtype=int)\n if not edge:\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\n else:\n if edge.lower() in ['rising', 'both']:\n ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]\n if edge.lower() in ['falling', 'both']:\n ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]\n ind = np.unique(np.hstack((ine, ire, ife)))\n # handle NaN's\n if ind.size and indnan.size:\n # NaN's and values close to NaN's cannot be peaks\n ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]\n # first and last values of x cannot be peaks\n if ind.size and ind[0] == 0:\n ind = ind[1:]\n if ind.size and ind[-1] == x.size-1:\n ind = ind[:-1]\n # remove peaks < minimum peak height\n if ind.size and mph is not None:\n ind = ind[x[ind] >= mph]\n # remove peaks - neighbors < threshold\n if ind.size and threshold > 0:\n dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)\n ind = np.delete(ind, np.where(dx < threshold)[0])\n # detect small peaks closer than minimum peak distance\n if ind.size and mpd > 1:\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\n idel = np.zeros(ind.size, dtype=bool)\n for i in range(ind.size):\n if not idel[i]:\n # keep peaks with the same height if kpsh is True\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \\\n & (x[ind[i]] > x[ind] if kpsh else True)\n idel[i] = 0 # Keep current peak\n # remove the small peaks and sort back the indices by their occurrence\n ind = np.sort(ind[~idel])\n\n if show:\n if indnan.size:\n x[indnan] = np.nan\n if valley:\n x = -x\n _plot(x, mph, mpd, threshold, edge, valley, ax, ind)\n\n return ind\n\n\ndef get_distribution(ps,ss,sigma,half_win,total_len,sample_rate):\n# x,y=get_distribution([10],[20],0.5,2,60,10)\n# plt.plot(x,y[:,:2])\n t= int((total_len-2*half_win)*sample_rate+1)\n st=half_win*sample_rate\n x= np.arange(st,t+st,1)/sample_rate\n y_n=np.ones(t)\n y_p=np.zeros(t)\n y_s=np.zeros(t)\n for p in ps:\n t_p=stats.norm.pdf(x, (p), sigma)\n y_p+=t_p/t_p.max()\n \n for s in ss:\n t_s=stats.norm.pdf(x, (s), sigma)\n y_s+=t_s/t_s.max()\n y_n=y_n-y_p-y_s\n y=np.vstack((y_p,y_s,y_n))\n y=np.swapaxes(y,0,1)\n \n return x,y\n\n \ndef cal_entropy(y1,y2):\n # y1 true, y2 predict\n n=len(y1)\n ans=0\n for i in range(3):\n prob=y2[:,i]\n x=np.where(prob>0.0000000001, prob, -10)\n np.log10(x,out=x,where=x>0)\n tmp=-y1[:,i]*x\n ans+=sum(tmp)\n return ans/n\n\n\ndef pre_recall(p,s,p_picks,s_picks):\n threshold=0.5\n p_len=len(p_picks)\n s_len=len(s_picks)\n p_match=0\n s_match=0\n p_err=-999\n s_err=-999\n if p_len>0:\n tp=np.abs(np.array(p_picks)-p).min()\n if tp<threshold:\n p_match=1\n p_err=tp\n if s_len>0:\n ts=np.abs(np.array(s_picks)-s).min()\n if ts<threshold:\n s_match=1\n s_err=ts\n return p_match,s_match,p_len,s_len,p_err,s_err\n\n# def pick_picks(ts,tt,min_proba):\n# prob_S = ts[:,1]\n# prob_P = ts[:,0]\n# prob_N = ts[:,2]\n\n# trigs = trigger_onset(prob_P, min_proba, 0.1)\n# p_picks = []\n# s_picks = []\n# for trig in trigs:\n# if trig[1] == trig[0]:\n# continue\n# pick = np.argmax(ts[trig[0]:trig[1], 0])+trig[0]\n# p_picks.append(round(tt[pick],2))\n\n# trigs = trigger_onset(prob_S, min_proba, 0.1)\n# for trig in trigs:\n# if trig[1] == trig[0]:\n# continue\n# pick = np.argmax(ts[trig[0]:trig[1], 1])+trig[0]\n# s_picks.append(round(tt[pick],2))\n# return p_picks,s_picks\n\ndef pick_picks(ts,tt,min_proba,sample_rate):\n prob_S = ts[:,1]\n prob_P = ts[:,0]\n prob_N = ts[:,2]\n itp = detect_peaks(prob_P, mph=min_proba, mpd=0.5*sample_rate, show=False)\n its = detect_peaks(prob_S, mph=min_proba, mpd=0.5*sample_rate, show=False)\n p_picks=tt[itp]\n s_picks=tt[its]\n p_prob=ts[itp]\n s_prob=ts[its]\n return p_picks,s_picks,p_prob,s_prob\n\ndef signaltonoise(a, axis=0, ddof=0):\n a = np.asanyarray(a)\n m = a.mean(axis)\n sd = a.std(axis=axis, ddof=ddof)\n return np.where(sd == 0, 0, m/sd)\n\n#####################\n# Hyperparameters\nmin_proba = 0.95 # Minimum softmax probability for phase detection\nfreq_min = 3.0\nfreq_max = 20.0\nfilter_data = True\ndecimate_data = False # If false, assumes data is already 100 Hz samprate\nn_shift = 10 # Number of samples to shift the sliding window at a time\nn_gpu = 1 # Number of GPUs to use (if any)\n#####################\n\n\n#-------------------------------------------------------------\n\ndef sliding_window(data, size, stepsize=1, padded=False, axis=-1, copy=True):\n \"\"\"\n Calculate a sliding window over a signal\n Parameters\n ----------\n data : numpy array\n The array to be slided over.\n size : int\n The sliding window size\n stepsize : int\n The sliding window stepsize. Defaults to 1.\n axis : int\n The axis to slide over. Defaults to the last axis.\n copy : bool\n Return strided array as copy to avoid sideffects when manipulating the\n output array.\n Returns\n -------\n data : numpy array\n A matrix where row in last dimension consists of one instance\n of the sliding window.\n Notes\n -----\n - Be wary of setting `copy` to `False` as undesired sideffects with the\n output values may occurr.\n Examples\n --------\n >>> a = numpy.array([1, 2, 3, 4, 5])\n >>> sliding_window(a, size=3)\n array([[1, 2, 3],\n [2, 3, 4],\n [3, 4, 5]])\n >>> sliding_window(a, size=3, stepsize=2)\n array([[1, 2, 3],\n [3, 4, 5]])\n See Also\n --------\n pieces : Calculate number of pieces available by sliding\n \"\"\"\n if axis >= data.ndim:\n raise ValueError(\n \"Axis value out of range\"\n )\n\n if stepsize < 1:\n raise ValueError(\n \"Stepsize may not be zero or negative\"\n )\n\n if size > data.shape[axis]:\n raise ValueError(\n \"Sliding window size may not exceed size of selected axis\"\n )\n\n shape = list(data.shape)\n shape[axis] = np.floor(data.shape[axis] / stepsize - size / stepsize + 1).astype(int)\n shape.append(size)\n\n strides = list(data.strides)\n strides[axis] *= stepsize\n strides.append(data.strides[axis])\n\n strided = np.lib.stride_tricks.as_strided(\n data, shape=shape, strides=strides\n )\n\n if copy:\n return strided.copy()\n else:\n return strided\n\n\nsample_rate=10\n\n\n \n# model_cnn=load_model('../MLData_2020_7_27/CNN_phase_picking2/DataVol2048000-20200803-202015'+'/CNNclassifier.h5')\n# model_rnn=load_model('../MLData_2020_9_4/RNN_Model/L5_roll_20200905-205008/RNNclassifier.h5')\n\nsig = nn.Sigmoid()\n\npre_model = gMLPmodel.load_from_checkpoint(checkpoint_path='/home/tian_feng/UCLA/gMLP_phase/gMLP_phase/test_trainer/test14/checkpoints/last.ckpt',hparams_file='/home/tian_feng/UCLA/gMLP_phase/gMLP_phase/test_trainer/test14/default/version_0/hparams.yaml')\n \npre_model.eval()\n\n \n# model = Graphmodel(pre_model).load_from_checkpoint(checkpoint_path=os.path.join('/home/tian_feng/UCLA/gMLP_phase/gMLP_phase/GNN/test2','checkpoints/epoch=0-step=45224.ckpt'),pre_model=pre_model)\n \n# model.eval() \n\n\nsta = np.load('/home/tian_feng/UCLA/Greece-2020-10-30/station.npy')\nsta_dict ={}\nfor i in sta:\n sta_dict[i[2]+'.'+i[3]] = (float(i[4]),float(i[5]))\n# print(sta_dict)\n\n\n# date='2020-11-11'\n\nstart=0\nsteps=start+3600*24\n\n\nfor i in glob('/home/tian_feng/UCLA/Greece-2020-10-30/Data2/*')[:1]:\n date=i.split('/')[-1]\n print(date)\n \n files = glob('/home/tian_feng/UCLA/Greece-2020-10-30/Data2/'+date+'/*.mseed')[:4]\n batch_size = len(files)\n loc = np.zeros((batch_size,2))\n \n dataset = np.zeros((60*24,batch_size,3,6000))\n \n \n for idx, path in tqdm(enumerate(files)):\n\n st=read(path)\n sta_name=os.path.basename(path[:-6])\n# print(sta_name)\n# print(sta_dict[sta_name])\n loc[idx,:] = sta_dict[sta_name]\n for slide_idx, windowed_st in enumerate(st.slide(window_length = 60.0, step = 60.0)):\n for i in range(3):\n windowed_st[i].detrend('demean')\n windowed_st[i].filter('bandpass', freqmin = 1.0, freqmax = 45, corners = 2, zerophase = True)\n windowed_st[i].taper(max_percentage = 0.001, type = 'cosine', max_length = 2)\n \n \n dataset[slide_idx, idx, 0, :6000] = np.transpose(windowed_st[1])[:6000] # N\n dataset[slide_idx, idx, 1, :6000] = np.transpose(windowed_st[0])[:6000] # E\n dataset[slide_idx, idx, 2, :6000] = np.transpose(windowed_st[2])[:6000] # Z \n \n# print(dataset)\n row_a=[]\n row_b=[] \n for i in range(batch_size):\n for j in range(batch_size):\n dis = locations2degrees(loc[i,0],loc[i,1],loc[j,0],loc[j,1])\n if dis < 1:\n row_a.append(i)\n row_b.append(j)\n edge_index = [row_a,row_b] \n edge_index = torch.tensor(edge_index)\n \n# n_shift = 6000 # Number of samples to shift the sliding window at a time\n# half_dur = 30.00\n# only_dt = 0.01\n# n_win = int(half_dur/only_dt)\n# n_feat = 2*n_win\n \n# slide_save = sliding_window(dataset, n_feat , stepsize=n_shift, axis = 2)\n# slide_save = np.transpose(slide_save, (2,0,1,3)) \n\n np.save('Input_data',dataset)\n slide_v = _normalize(dataset, 'std') \n np.save('Input_max',slide_v)\n \n slide_v = torch.from_numpy(slide_v).float()\n\n print(slide_v.shape)\n Output_single_model = np.zeros_like(slide_v)\n Output_multi_model = np.zeros_like(slide_v)\n print(Output_multi_model.shape)\n\n for i in tqdm(range(slide_v.shape[0])):\n X = slide_v[i] \n# tmp = sig(model.forward([X,0,edge_index])).detach().numpy()\n tmp2 = sig(pre_model.forward(X)).detach().numpy()\n \n# Output_multi_model[i] = tmp\n Output_single_model[i] = tmp2\n\n# np.save('Output_multi_model',Output_multi_model)\n np.save('Output_single_model',Output_single_model)\n \n"
]
| [
[
"scipy.stats.norm.pdf",
"numpy.lib.stride_tricks.as_strided",
"numpy.load",
"numpy.mean",
"numpy.where",
"numpy.sort",
"numpy.max",
"numpy.zeros_like",
"numpy.save",
"numpy.swapaxes",
"torch.tensor",
"numpy.arange",
"numpy.transpose",
"numpy.log10",
"numpy.vstack",
"numpy.array",
"numpy.zeros",
"matplotlib.rc",
"numpy.std",
"numpy.argsort",
"numpy.hstack",
"numpy.floor",
"numpy.isnan",
"torch.nn.Sigmoid",
"numpy.ones",
"torch.from_numpy",
"numpy.atleast_1d",
"numpy.asanyarray"
]
]
|
atamazian/kaggle_plant-pathology | [
"f0bfb23ff56470336d602af3da340f29b28518cf"
]
| [
"kaggle_plantpatho/augment.py"
]
| [
"\"\"\"Module to perform efficient preprocess and data augmentation.\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom kornia import augmentation, geometry, image_to_tensor\n# Define the augmentations pipeline\nfrom torch import Tensor\nfrom torchvision import transforms as T\nfrom kaggle_plantpatho import DATASET_IMAGE_MEAN, DATASET_IMAGE_STD\n\n#: default training augmentation\nTORCHVISION_TRAIN_TRANSFORM = T.Compose([\n T.Resize(size=512),\n T.RandomRotation(degrees=30),\n T.RandomPerspective(distortion_scale=0.4),\n T.RandomResizedCrop(size=224),\n T.RandomHorizontalFlip(p=0.5),\n T.RandomVerticalFlip(p=0.5),\n # T.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),\n T.ToTensor(),\n # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom\n])\n#: default validation augmentation\nTORCHVISION_VALID_TRANSFORM = T.Compose([\n T.Resize(size=256),\n T.CenterCrop(size=224),\n T.ToTensor(),\n # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom\n])\n\n\nclass Resize(nn.Module):\n\n def __init__(self, size: int):\n super().__init__()\n self.size = size\n\n def forward(self, x):\n return geometry.resize(x[None], self.size)[0]\n\n\nclass LitPreprocess(nn.Module):\n \"\"\"Applies the processing to the image in the worker before collate.\"\"\"\n\n def __init__(self, img_size: int):\n super().__init__()\n self.preprocess = nn.Sequential(\n # K.augmentation.RandomResizedCrop((224, 224)),\n Resize((img_size, img_size)), # use this better to see whole image\n augmentation.Normalize(Tensor(DATASET_IMAGE_MEAN), Tensor(DATASET_IMAGE_STD)),\n )\n\n @torch.no_grad()\n def forward(self, x: Tensor) -> Tensor:\n x = image_to_tensor(np.array(x)).float() / 255.\n assert len(x.shape) == 3, x.shape\n out = self.preprocess(x)\n return out[0]\n\n\nclass LitAugmenter(nn.Module):\n \"\"\"Applies random augmentation to a batch of images.\"\"\"\n\n def __init__(self, viz: bool = False):\n super().__init__()\n self.viz = viz\n '''self.geometric = [\n K.augmentation.RandomAffine(60., p=0.75),\n ]'''\n self.augmentations = nn.Sequential(\n augmentation.RandomRotation(degrees=30.),\n augmentation.RandomPerspective(distortion_scale=0.4),\n augmentation.RandomResizedCrop((224, 224)),\n augmentation.RandomHorizontalFlip(p=0.5),\n augmentation.RandomVerticalFlip(p=0.5),\n # K.augmentation.GaussianBlur((3, 3), (0.1, 2.0), p=1.0),\n # K.augmentation.ColorJitter(0.01, 0.01, 0.01, 0.01, p=0.25),\n )\n self.denorm = augmentation.Denormalize(Tensor(DATASET_IMAGE_MEAN), Tensor(DATASET_IMAGE_STD))\n\n @torch.no_grad()\n def forward(self, x: Tensor) -> Tensor:\n assert len(x.shape) == 4, x.shape\n out = x\n # idx = torch.randperm(len(self.geometric))[0] # OneOf\n # out = self.geometric[idx](x)\n out = self.augmentations(out)\n if self.viz:\n out = self.denorm(out)\n return out\n\n\n#: Kornia default augmentations\nKORNIA_TRAIN_TRANSFORM = LitPreprocess(512)\nKORNIA_VALID_TRANSFORM = LitPreprocess(224)\n"
]
| [
[
"numpy.array",
"torch.no_grad",
"torch.Tensor"
]
]
|
dials/iota | [
"0a0569c4c6e666c1410fa1a83715c7198179f83c"
]
| [
"src/iota/components/iota_analysis.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\nfrom six.moves import range, zip\n\n\"\"\"\nAuthor : Lyubimov, A.Y.\nCreated : 04/07/2015\nLast Changed: 11/21/2019\nDescription : Analyzes integration results and outputs them in an accessible\n format. Includes (optional) unit cell analysis by hierarchical\n clustering (Zeldin, et al., Acta Cryst D, 2013). In case of\n multiple clusters outputs a file with list of integrated pickles\n that comprise each cluster. (The clustering module requires scipy\n and is thus currently suspended.) Populates a PHIL file for PRIME\n with information from integration results (e.g. unit cell,\n resolution, data path, etc.)\n\"\"\"\n\nimport os\nimport numpy as np\nfrom collections import Counter\nimport math\n\nfrom libtbx import easy_pickle as ep\nfrom cctbx import crystal, uctbx, statistics\nfrom cctbx.sgtbx.lattice_symmetry import metric_subgroups\n\nfrom iota import iota_version, now\nimport iota.components.iota_utils as util\n\nfrom prime.postrefine.mod_mx import mx_handler\nfrom prime.postrefine import mod_input\n\n\ndef isprop(v):\n \"\"\"Test if attribute is a property.\"\"\"\n return isinstance(v, property)\n\n\nclass AnalysisResult(object):\n pass\n\n\nclass Plotter(object):\n def __init__(self, params, info):\n\n self.info = info\n self.params = params\n self.final_objects = self.info.get_final_objects()\n\n self.hm_file = os.path.join(self.info.viz_base, \"heatmap.pdf\")\n self.hi_file = os.path.join(self.info.viz_base, \"res_histogram.pdf\")\n self.xy_file = os.path.join(self.info.viz_base, \"beamXY.pdf\")\n\n self.font = {\"fontfamily\": \"sans-serif\", \"fontsize\": 12}\n\n def plot_spotfinding_heatmap(self, write_files=False):\n\n import matplotlib.pyplot as plt\n\n hlist = [i.final[\"sph\"] for i in self.final_objects]\n alist = [i.final[\"spa\"] for i in self.final_objects]\n\n ch = max(hlist) - min(hlist) + 1\n ca = max(alist) - min(alist) + 1\n ints = [(i.final[\"sph\"], i.final[\"spa\"]) for i in self.final_objects]\n ic = Counter(ints)\n\n hm_data = np.zeros((ch, ca))\n for i in ic.items():\n hm_data[i[0][0] - min(hlist), i[0][1] - min(alist)] = i[1]\n\n rows = range(min(hlist), max(hlist) + 1)\n cols = range(min(alist), max(alist) + 1)\n row_labels = [str(i) for i in rows]\n col_labels = [str(j) for j in cols]\n\n fig, ax = plt.subplots()\n fig.canvas.draw()\n heatmap = plt.pcolor(hm_data, cmap=\"Reds\")\n\n ax.set_yticks(np.arange(len(rows)) + 0.5, minor=False)\n ax.set_xticks(np.arange(len(cols)) + 0.5, minor=False)\n ax.set_yticklabels(row_labels, minor=False)\n ax.set_xticklabels(col_labels, minor=False)\n ax.set_xlabel(\"Spot area\")\n ax.set_ylabel(\"Spot height\")\n\n plt.gca().set_xlim(0, len(cols))\n plt.gca().set_ylim(0, len(rows))\n\n # Annotate\n for y in range(hm_data.shape[0]):\n for x in range(hm_data.shape[1]):\n plt.text(\n x + 0.5,\n y + 0.5,\n \"%3d\" % hm_data[y, x],\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n\n if write_files:\n fig.savefig(self.hm_file, format=\"pdf\", bbox_inches=0)\n else:\n plt.show()\n\n def calculate_beam_xy(self):\n \"\"\"calculates beam xy and other parameters.\"\"\"\n info = []\n\n # Import relevant info\n pixel_size = self.info.pixel_size\n for i in [j.final for j in self.final_objects]:\n try:\n info.append(\n [\n i,\n i[\"beamX\"],\n i[\"beamY\"],\n i[\"wavelength\"],\n i[\"distance\"],\n (i[\"a\"], i[\"b\"], i[\"c\"], i[\"alpha\"], i[\"beta\"], i[\"gamma\"]),\n ]\n )\n except IOError as e:\n print(\"IOTA ANALYSIS ERROR: BEAMXY failed! \", e)\n pass\n\n # Calculate beam center coordinates and distances\n beamX = [i[1] for i in info]\n beamY = [j[2] for j in info]\n beam_dist = [\n math.hypot(i[1] - np.median(beamX), i[2] - np.median(beamY)) for i in info\n ]\n beam_dist_std = np.std(beam_dist)\n img_list = [\n [i[0], i[1], i[2], i[3], i[4], i[5], j]\n for i, j in list(zip(info, beam_dist))\n ]\n\n # Separate out outliers\n outliers = [i for i in img_list if i[3] > 2 * beam_dist_std]\n clean = [i for i in img_list if i[3] <= 2 * beam_dist_std]\n cbeamX = [i[1] for i in clean]\n cbeamY = [j[2] for j in clean]\n obeamX = [i[1] for i in outliers]\n obeamY = [j[2] for j in outliers]\n\n # Calculate median wavelength, detector distance and unit cell params from\n # non-outliers only\n wavelengths = [i[3] for i in clean]\n distances = [i[4] for i in clean]\n cells = [i[5] for i in clean]\n\n wavelength = np.median(wavelengths)\n det_distance = np.median(distances)\n a = np.median([i[0] for i in cells])\n b = np.median([i[1] for i in cells])\n c = np.median([i[2] for i in cells])\n\n # Calculate predicted L +/- 1 misindexing distance for each cell edge\n aD = det_distance * math.tan(2 * math.asin(wavelength / (2 * a)))\n bD = det_distance * math.tan(2 * math.asin(wavelength / (2 * b)))\n cD = det_distance * math.tan(2 * math.asin(wavelength / (2 * c)))\n\n return (\n beamX,\n beamY,\n cbeamX,\n cbeamY,\n obeamX,\n obeamY,\n beam_dist,\n [i[4] for i in info],\n aD,\n bD,\n cD,\n pixel_size,\n )\n\n def plot_beam_xy(self, write_files=False, return_values=False, threeD=False):\n \"\"\"Plot beam center coordinates and a histogram of distances from the\n median of beam center coordinates to each set of coordinates.\n\n Superpose a predicted mis-indexing shift by L +/- 1 (calculated\n for each axis).\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n # Get values\n (\n beamX,\n beamY,\n cbeamX,\n cbeamY,\n obeamX,\n obeamY,\n beam_dist,\n distances,\n aD,\n bD,\n cD,\n pixel_size,\n ) = self.calculate_beam_xy()\n\n # Plot figure\n if threeD:\n fig = plt.figure(figsize=(8, 8))\n ax1 = fig.add_subplot(111, projection=\"3d\")\n else:\n fig = plt.figure(figsize=(9, 13))\n gsp = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n ax1 = fig.add_subplot(gsp[0, :], aspect=\"equal\")\n\n # Calculate axis limits of beam center scatter plot\n ax1_delta = np.ceil(np.max(beam_dist))\n xmax = round(np.median(beamX) + ax1_delta)\n xmin = round(np.median(beamX) - ax1_delta)\n ymax = round(np.median(beamY) + ax1_delta)\n ymin = round(np.median(beamY) - ax1_delta)\n zmax = round(np.ceil(np.max(distances)))\n zmin = round(np.floor(np.min(distances)))\n\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(ymin, ymax)\n if threeD:\n ax1.set_zlim(zmin, zmax)\n\n # Plot beam center scatter plot\n if threeD:\n ax1.scatter(beamX, beamY, distances, alpha=1, s=20, c=\"grey\", lw=1)\n ax1.plot(\n [np.median(beamX)],\n [np.median(beamY)],\n [np.median(distances)],\n markersize=8,\n marker=\"o\",\n c=\"yellow\",\n lw=2,\n )\n else:\n ax1.scatter(cbeamX, cbeamY, alpha=1, s=20, c=\"grey\", lw=1)\n ax1.scatter(obeamX, obeamY, alpha=1, s=20, c=\"red\", lw=1)\n ax1.plot(\n np.median(beamX),\n np.median(beamY),\n markersize=8,\n marker=\"o\",\n c=\"yellow\",\n lw=2,\n )\n\n # Plot projected mis-indexing limits for all three axes\n circle_a = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=aD,\n color=\"r\",\n fill=False,\n clip_on=True,\n )\n circle_b = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=bD,\n color=\"g\",\n fill=False,\n clip_on=True,\n )\n circle_c = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=cD,\n color=\"b\",\n fill=False,\n clip_on=True,\n )\n ax1.add_patch(circle_a)\n ax1.add_patch(circle_b)\n ax1.add_patch(circle_c)\n\n # Set labels\n ax1.set_xlabel(\"BeamX (mm)\", fontsize=15)\n ax1.set_ylabel(\"BeamY (mm)\", fontsize=15)\n if threeD:\n ax1.set_zlabel(\"Distance (mm)\", fontsize=15)\n ax1.set_title(\"Beam XYZ Coordinates\")\n else:\n ax1.set_title(\"Beam XY Coordinates\")\n\n if not threeD:\n # Plot histogram of distances to each beam center from median\n ax2 = fig.add_subplot(gsp[1, :])\n ax2_n, ax2_bins, ax2_patches = plt.hist(\n beam_dist, 20, facecolor=\"b\", alpha=0.75, histtype=\"stepfilled\"\n )\n ax2_height = (np.max(ax2_n) + 9) // 10 * 10\n ax2.axis([0, np.max(beam_dist), 0, ax2_height])\n ax2.set_xlabel(\"Distance from median (mm)\", fontsize=15)\n ax2.set_ylabel(\"No. of images\", fontsize=15)\n\n if write_files:\n fig.savefig(self.xy_file, format=\"pdf\", bbox_inches=0)\n else:\n plt.show()\n\n if return_values:\n return np.median(beamX), np.median(beamY), pixel_size\n\n def plot_res_histogram(self, write_files=False):\n\n import matplotlib.pyplot as plt\n\n # Get resolution values\n hres = [i.final[\"res\"] for i in self.final_objects]\n lres = [i.final[\"lres\"] for i in self.final_objects]\n\n # Plot figure\n fig = plt.figure(figsize=(9, 13))\n gsp = gridspec.GridSpec(2, 1)\n hr = fig.add_subplot(gsp[0, :])\n hr_n, hr_bins, hr_patches = plt.hist(\n hres, 20, facecolor=\"b\", alpha=0.75, histtype=\"stepfilled\"\n )\n hr_height = (np.max(hr_n) + 9) // 10 * 10\n hr.axis([np.min(hres), np.max(hres), 0, hr_height])\n reslim = \"High Resolution Limit ({})\".format(r\"$\\AA$\")\n hr.set_xlabel(reslim, fontsize=15)\n hr.set_ylabel(\"No. of frames\", fontsize=15)\n\n lr = fig.add_subplot(gsp[1, :])\n lr_n, lr_bins, lr_patches = plt.hist(\n lres, 20, facecolor=\"b\", alpha=0.75, histtype=\"stepfilled\"\n )\n lr_height = (np.max(lr_n) + 9) // 10 * 10\n lr.axis([np.min(lres), np.max(lres), 0, lr_height])\n reslim = \"Low Resolution Limit ({})\".format(r\"$\\AA$\")\n lr.set_xlabel(reslim, fontsize=15)\n lr.set_ylabel(\"No. of frames\", fontsize=15)\n\n if write_files:\n fig.savefig(self.hi_file, format=\"pdf\", bbox_inches=0)\n else:\n plt.show()\n\n\nclass Analyzer(object):\n \"\"\"Class to analyze integration results.\"\"\"\n\n def __init__(self, info=None, params=None, gui_mode=False):\n\n self.info = info\n self.params = params\n self.gui_mode = gui_mode\n\n # Attributes for LivePRIME override\n self.best_pg = None\n self.best_uc = None\n\n def get_results(self, finished_objects=None):\n if not finished_objects:\n finished_objects = self.info.get_finished_objects()\n if not finished_objects:\n return False\n final_objects = []\n\n self.info.unplotted_stats = {}\n for key in self.info.stats:\n self.info.unplotted_stats[key] = dict(lst=[])\n\n for obj in finished_objects:\n item = [obj.input_index, obj.img_path, obj.img_index]\n if len(self.info.unprocessed) > 0 and item in self.info.unprocessed:\n self.info.unprocessed.remove(item)\n if (\n len(self.info.categories[\"not_processed\"][0]) > 0\n and item in self.info.categories[\"not_processed\"][0]\n ):\n self.info.categories[\"not_processed\"][0].remove(item)\n\n if obj.fail:\n key = obj.fail.replace(\" \", \"_\")\n if key in self.info.categories:\n self.info.categories[key][0].append(item)\n else:\n self.info.categories[\"integrated\"][0].append(obj.final[\"final\"])\n self.info.final_objects.append(obj.obj_file)\n final_objects.append(obj)\n\n if not obj.fail or \"triage\" not in obj.fail:\n self.info.categories[\"have_diffraction\"][0].append(obj.img_path)\n\n # Calculate processing stats from final objects\n if final_objects:\n self.info.pixel_size = final_objects[0].final[\"pixel_size\"]\n\n # Get observations from file\n try:\n all_obs = ep.load(self.info.idx_file)\n except Exception:\n all_obs = None\n\n # Collect image processing stats\n for obj in final_objects:\n for key in self.info.stats:\n if key in obj.final:\n stat_tuple = (\n obj.input_index,\n obj.img_path,\n obj.img_index,\n obj.final[key],\n )\n self.info.stats[key][\"lst\"].append(stat_tuple)\n\n # add proc filepath info to 'pointers'\n pointer_dict = {\n \"img_file\": obj.img_path,\n \"obj_file\": obj.obj_file,\n \"img_index\": obj.img_index,\n \"experiments\": obj.eint_path,\n \"reflections\": obj.rint_path,\n }\n self.info.pointers[str(obj.input_index)] = pointer_dict\n\n if key not in self.info.unplotted_stats:\n self.info.unplotted_stats[key] = dict(lst=[])\n self.info.unplotted_stats[key][\"lst\"].append(stat_tuple)\n\n # Unit cells and space groups (i.e. cluster iterable)\n self.info.cluster_iterable.append(\n [\n float(obj.final[\"a\"]),\n float(obj.final[\"b\"]),\n float(obj.final[\"c\"]),\n float(obj.final[\"alpha\"]),\n float(obj.final[\"beta\"]),\n float(obj.final[\"gamma\"]),\n str(obj.final[\"sg\"]),\n ]\n )\n\n # Get observations from this image\n obs = None\n if \"observations\" in obj.final:\n obs = obj.final[\"observations\"].as_non_anomalous_array()\n else:\n pickle_path = obj.final[\"final\"]\n if os.path.isfile(pickle_path):\n try:\n pickle = ep.load(pickle_path)\n obs = pickle[\"observations\"][0].as_non_anomalous_array()\n except Exception as e:\n print(\n \"IMAGE_PICKLE_ERROR for {}: {}\".format(pickle_path, e)\n )\n\n with util.Capturing():\n if obs:\n # Append observations to combined miller array\n obs = obs.expand_to_p1()\n if all_obs:\n all_obs = all_obs.concatenate(\n obs, assert_is_similar_symmetry=False\n )\n else:\n all_obs = obs\n\n # Get B-factor from this image\n try:\n mxh = mx_handler()\n asu_contents = mxh.get_asu_contents(500)\n observations_as_f = obs.as_amplitude_array()\n observations_as_f.setup_binner(auto_binning=True)\n wp = statistics.wilson_plot(\n observations_as_f, asu_contents, e_statistics=True\n )\n b_factor = wp.wilson_b\n except RuntimeError as e:\n b_factor = 0\n print(\"B_FACTOR_ERROR: \", e)\n self.info.b_factors.append(b_factor)\n\n # Save collected observations to file\n if all_obs:\n ep.dump(self.info.idx_file, all_obs)\n\n # Calculate dataset stats\n for k in self.info.stats:\n stat_list = list(zip(*self.info.stats[k][\"lst\"]))[3]\n stats = dict(\n lst=self.info.stats[k][\"lst\"],\n median=np.median(stat_list).item(),\n mean=np.mean(stat_list).item(),\n std=np.std(stat_list).item(),\n max=np.max(stat_list).item(),\n min=np.min(stat_list).item(),\n cons=Counter(stat_list).most_common(1)[0][0],\n )\n self.info.stats[k].update(stats)\n return True\n else:\n return False\n\n def print_results(self, final_table=None):\n \"\"\"Prints diagnostics from the final integration run.\"\"\"\n\n assert self.info\n\n if not final_table:\n final_table = [\"\\n\\n{:-^80}\\n\".format(\"ANALYSIS OF RESULTS\")]\n\n if not self.info.categories[\"integrated\"]:\n final_table.append(\"NO IMAGES INTEGRATED!\")\n else:\n label_lens = [len(v[\"label\"]) for k, v in self.info.stats.items()]\n max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5\n for k, v in self.info.stats.items():\n if k in (\"lres\", \"res\", \"beamX\", \"beamY\"):\n continue\n line = (\n \"{: <{l}}: max = {:<6.2f} min = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n final_table.append(line)\n\n # TODO: Figure out what to do with summary charts\n # # If more than one integrated image, plot various summary graphs\n # if len(self.info.categories['integrated']) > 1:\n # plot = Plotter(self.params, self.info)\n # if self.params.analysis.summary_graphs:\n # if ( self.params.advanced.processing_backend == 'ha14' and\n # self.params.cctbx_ha14.grid_search.type is not None\n # ):\n # plot.plot_spotfinding_heatmap(write_files=True)\n # plot.plot_res_histogram(write_files=True)\n # med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,\n # return_values=True)\n # else:\n # with warnings.catch_warnings():\n # # To catch any 'mean of empty slice' runtime warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # beamXY_info = plot.calculate_beam_xy()\n # beamX, beamY = beamXY_info[:2]\n # med_beamX = np.median(beamX)\n # med_beamY = np.median(beamY)\n # pixel_size = beamXY_info[-1]\n\n final_table.append(\n \"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}\"\n \"\".format(\n \"Median Beam Center\",\n self.info.stats[\"beamX\"][\"mean\"],\n self.info.stats[\"beamY\"][\"mean\"],\n l=max_label,\n )\n )\n\n # Special entry for resolution last\n v = self.info.stats[\"res\"]\n final_table.append(\n \"{: <{l}}: low = {:<6.2f} high = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n\n for item in final_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(final_table=final_table)\n\n def unit_cell_analysis(self):\n \"\"\"Calls unit cell analysis module, which uses hierarchical clustering\n (Zeldin, et al, Acta D, 2015) to split integration results according to\n detected morphological groupings (if any).\n\n Most useful with preliminary integration without target unit\n cell specified.\n \"\"\"\n\n # Will not run clustering if only one integration result found or if turned off\n if not self.info.categories[\"integrated\"]:\n util.main_log(\n self.info.logfile, \"\\n\\n{:-^80}\\n\".format(\" UNIT CELL ANALYSIS \"), True\n )\n util.main_log(self.info.logfile, \"\\n UNIT CELL CANNOT BE DETERMINED!\", True)\n\n elif len(self.info.categories[\"integrated\"]) == 1:\n unit_cell = self.info.cluster_iterable[0][:5]\n point_group = self.info.cluster_iterable[0][6]\n util.main_log(\n self.info.logfile, \"\\n\\n{:-^80}\\n\".format(\" UNIT CELL ANALYSIS \"), True\n )\n uc_line = (\n \"{:<6} {:^4}: {:<6.2f}, {:<6.2f}, {:<6.2f}, {:<6.2f}, \"\n \"{:<6.2f}, {:<6.2f}\".format(\n \"(1)\",\n point_group,\n unit_cell[0],\n unit_cell[1],\n unit_cell[2],\n unit_cell[3],\n unit_cell[4],\n unit_cell[5],\n )\n )\n util.main_log(self.info.logfile, uc_line, True)\n\n self.info.best_pg = str(point_group)\n self.info.best_uc = unit_cell\n\n else:\n uc_table = []\n uc_summary = []\n\n if self.params.analysis.clustering.flag_on:\n # run hierarchical clustering analysis\n from xfel.clustering.cluster import Cluster\n\n counter = 0\n self.info.clusters = []\n\n threshold = self.params.analysis.clustering.threshold\n cluster_limit = self.params.analysis.clustering.limit\n final_pickles = self.info.categories[\"integrated\"][0]\n\n pickles = []\n if self.params.analysis.clustering.n_images > 0:\n import random\n\n for i in range(len(self.params.analysis.clustering.n_images)):\n random_number = random.randrange(0, len(final_pickles))\n if final_pickles[random_number] in pickles:\n while final_pickles[random_number] in pickles:\n random_number = random.randrange(0, len(final_pickles))\n pickles.append(final_pickles[random_number])\n else:\n pickles = final_pickles\n\n # Cluster from files (slow, but will keep for now)\n ucs = Cluster.from_files(pickle_list=pickles)\n\n # Do clustering\n clusters, _ = ucs.ab_cluster(\n threshold=threshold,\n log=False,\n write_file_lists=False,\n schnell=False,\n doplot=False,\n )\n uc_table.append(\"\\n\\n{:-^80}\\n\" \"\".format(\" UNIT CELL ANALYSIS \"))\n\n # extract clustering info and add to summary output list\n if cluster_limit is None:\n if len(pickles) / 10 >= 10:\n cluster_limit = 10\n else:\n cluster_limit = len(pickles) / 10\n\n for cluster in clusters:\n sorted_pg_comp = sorted(\n cluster.pg_composition.items(), key=lambda x: -1 * x[1]\n )\n pg_nums = [pg[1] for pg in sorted_pg_comp]\n cons_pg = sorted_pg_comp[np.argmax(pg_nums)]\n\n if len(cluster.members) > cluster_limit:\n counter += 1\n\n # Write to file\n cluster_filenames = [j.path for j in cluster.members]\n if self.params.analysis.clustering.write_files:\n output_file = os.path.join(\n self.info.int_base, \"uc_cluster_{}.lst\".format(counter)\n )\n for fn in cluster_filenames:\n with open(output_file, \"a\") as scf:\n scf.write(\"{}\\n\".format(fn))\n\n mark_output = os.path.basename(output_file)\n else:\n mark_output = \"*\"\n output_file = None\n\n else:\n mark_output = \"\"\n output_file = None\n\n # Populate clustering info for GUI display\n uc_init = uctbx.unit_cell(cluster.medians)\n symmetry = crystal.symmetry(\n unit_cell=uc_init, space_group_symbol=\"P1\"\n )\n groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)\n top_group = groups.result_groups[0]\n best_sg = str(groups.lattice_group_info()).split(\"(\")[0]\n best_uc = top_group[\"best_subsym\"].unit_cell().parameters()\n # best_sg = str(top_group['best_subsym'].space_group_info())\n\n uc_no_stdev = (\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"\".format(\n best_uc[0],\n best_uc[1],\n best_uc[2],\n best_uc[3],\n best_uc[4],\n best_uc[5],\n )\n )\n cluster_info = {\n \"number\": len(cluster.members),\n \"pg\": best_sg,\n \"uc\": uc_no_stdev,\n \"filename\": mark_output,\n }\n self.info.clusters.append(cluster_info)\n\n # format and record output\n # TODO: How to propagate stdevs after conversion from Niggli?\n # uc_line = \"{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \"\\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \"\\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) \"\\\n # \"{}\".format('({})'.format(len(cluster.members)), cons_pg[0],\n # cluster.medians[0], cluster.stdevs[0],\n # cluster.medians[1], cluster.stdevs[1],\n # cluster.medians[2], cluster.stdevs[2],\n # cluster.medians[3], cluster.stdevs[3],\n # cluster.medians[4], cluster.stdevs[4],\n # cluster.medians[5], cluster.stdevs[5],\n # mark_output)\n # uc_table.append(uc_line)\n uc_table.append(\n \"{:<6}: {} {}\".format(\n len(cluster.members), uc_no_stdev, mark_output\n )\n )\n lattices = \", \".join(\n [\"{} ({})\".format(i[0], i[1]) for i in sorted_pg_comp]\n )\n # uc_info = [len(cluster.members), cons_pg[0], cluster.medians,\n # output_file, uc_line, lattices]\n uc_info = [\n len(cluster.members),\n best_sg,\n best_uc,\n output_file,\n uc_no_stdev,\n lattices,\n ]\n uc_summary.append(uc_info)\n\n else:\n # generate average unit cell\n uc_table.append(\n \"\\n\\n{:-^80}\\n\" \"\".format(\" UNIT CELL AVERAGING (no clustering) \")\n )\n uc_a, uc_b, uc_c, uc_alpha, uc_beta, uc_gamma, uc_sg = list(\n zip(*self.info.cluster_iterable)\n )\n cons_pg = Counter(uc_sg).most_common(1)[0][0]\n all_pgs = Counter(uc_sg).most_common()\n unit_cell = (\n np.median(uc_a),\n np.median(uc_b),\n np.median(uc_c),\n np.median(uc_alpha),\n np.median(uc_beta),\n np.median(uc_gamma),\n )\n\n # Populate clustering info for GUI display\n uc_init = uctbx.unit_cell(unit_cell)\n symmetry = crystal.symmetry(unit_cell=uc_init, space_group_symbol=\"P1\")\n groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)\n top_group = groups.result_groups[0]\n best_sg = str(groups.lattice_group_info()).split(\"(\")[0]\n best_uc = top_group[\"best_subsym\"].unit_cell().parameters()\n # best_sg = str(top_group['best_subsym'].space_group_info())\n\n uc_no_stdev = (\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"\".format(\n best_uc[0],\n best_uc[1],\n best_uc[2],\n best_uc[3],\n best_uc[4],\n best_uc[5],\n )\n )\n cluster_info = {\n \"number\": len(self.info.cluster_iterable),\n \"pg\": best_sg,\n \"uc\": uc_no_stdev,\n \"filename\": None,\n }\n self.info.clusters.append(cluster_info)\n\n # uc_line = \"{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \" \\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \" \\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) \" \\\n # \"{}\".format('({})'.format(len(self.final_objects)), cons_pg,\n # np.median(uc_a), np.std(uc_a),\n # np.median(uc_b), np.std(uc_b),\n # np.median(uc_c), np.std(uc_c),\n # np.median(uc_alpha), np.std(uc_alpha),\n # np.median(uc_beta), np.std(uc_beta),\n # np.median(uc_gamma), np.std(uc_gamma), '')\n #\n # uc_table.append(uc_line)\n uc_table.append(uc_no_stdev)\n lattices = \", \".join([\"{} ({})\".format(i[0], i[1]) for i in all_pgs])\n # uc_info = [len(self.final_objects), cons_pg, unit_cell, None,\n # uc_line, lattices]\n uc_info = [\n len(self.info.cluster_iterable),\n best_sg,\n best_uc,\n None,\n uc_no_stdev,\n lattices,\n ]\n uc_summary.append(uc_info)\n\n uc_table.append(\"\\nMost common unit cell:\\n\")\n\n # select the most prevalent unit cell (most members in cluster)\n uc_freqs = [i[0] for i in uc_summary]\n uc_pick = uc_summary[np.argmax(uc_freqs)]\n uc_table.append(uc_pick[4])\n uc_table.append(\n \"\\nBravais Lattices in Biggest Cluster: {}\" \"\".format(uc_pick[5])\n )\n self.info.best_pg = str(uc_pick[1])\n self.info.best_uc = uc_pick[2]\n\n if uc_pick[3] is not None:\n self.prime_data_path = uc_pick[3]\n\n for item in uc_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(uc_table=uc_table)\n\n if self.gui_mode:\n return self.info.clusters\n\n def print_summary(self, write_files=True):\n \"\"\"Prints summary and appends to general log file.\n\n Also outputs some of it on stdout. Also writes out output list\n files.\n \"\"\"\n\n assert self.info\n\n if not self.info.categories[\"integrated\"]:\n util.main_log(\n self.info.logfile,\n \"NO IMAGES SUCCESSFULLY PROCESSSED!\",\n (not self.gui_mode),\n )\n return\n\n summary = []\n summary.append(\"\\n\\n{:-^80}\\n\".format(\"SUMMARY\"))\n categories = [\n \"total\",\n \"failed_triage\",\n \"have_diffraction\",\n \"failed_spotfinding\",\n \"failed_indexing\",\n \"failed_grid_search\",\n \"failed_integration\",\n \"failed_filter\",\n \"integrated\",\n ]\n for cat in categories:\n lst, fail, fn, _ = self.info.categories[cat]\n path = os.path.join(self.info.int_base, fn)\n if len(lst) > 0 or cat in (\"integrated\", \"diffraction\"):\n summary.append(\"{: <20}: {}\".format(\"{} \".format(fail), len(lst)))\n with open(path, \"w\") as cf:\n for item in lst:\n if isinstance(item, tuple) or isinstance(item, list):\n item = \", \".join([str(i) for i in item])\n cf.write(\"{}\\n\".format(item))\n if cat == \"integrated\" and write_files:\n if not hasattr(self, \"prime_data_path\"):\n self.prime_data_path = path\n\n summary.append(\"\\n\\nIOTA version {0}\".format(iota_version))\n summary.append(\"{}\\n\".format(now))\n\n for item in summary:\n util.main_log(self.info.logfile, \"{}\".format(item), False)\n self.info.update(summary=summary)\n\n def make_prime_input(self, filename=\"prime.phil\", run_zero=False):\n \"\"\"Imports default PRIME input parameters, modifies correct entries and\n prints out a starting PHIL file to be used with PRIME.\"\"\"\n assert self.info\n\n pixel_size = self.info.pixel_size\n hres = self.info.stats[\"res\"]\n lres = self.info.stats[\"lres\"]\n\n # If symmetry / unit cell were not overridden from GUI, set from INFO\n if not self.best_pg:\n try:\n self.best_pg = self.info.best_pg.replace(\" \", \"\")\n except AttributeError as e:\n print(\"PRIME INPUT ERROR, SPACE GROUP: \", e)\n self.best_pg = \"P1\"\n\n if not self.best_uc:\n self.best_uc = self.info.best_uc\n\n # Determine crystal system from crystal symmetry\n sym = crystal.symmetry(space_group_symbol=self.best_pg)\n crystal_system = str(sym.space_group().crystal_system())\n\n # Determine number of images for indexing ambiguity resolution\n # My default: 1/2 of images or 300, whichever is smaller\n if len(self.info.categories[\"integrated\"]) >= 600:\n idx_ambiguity_sample = 300\n idx_ambiguity_selected = 100\n else:\n idx_ambiguity_sample = int(\n round(len(self.info.categories[\"integrated\"]) / 2)\n )\n idx_ambiguity_selected = int(round(idx_ambiguity_sample / 3))\n\n # Set run number to 000 if running LivePRIME\n out_dir = os.path.join(os.path.dirname(self.prime_data_path), \"prime\")\n if run_zero:\n run_path = os.path.join(out_dir, \"000\")\n else:\n run_path = util.set_base_dir(out_dir=out_dir)\n\n # Populate pertinent data parameters\n prime_params = mod_input.master_phil.extract()\n prime_params.run_no = run_path\n prime_params.data = [self.prime_data_path]\n prime_params.title = \"Auto-generated by IOTA v{} on {}\" \"\".format(\n iota_version, now\n )\n prime_params.scale.d_min = hres[\"mean\"]\n prime_params.scale.d_max = 8\n prime_params.postref.scale.d_min = hres[\"mean\"]\n prime_params.postref.scale.d_max = lres[\"max\"]\n prime_params.postref.crystal_orientation.d_min = hres[\"mean\"]\n prime_params.postref.crystal_orientation.d_max = lres[\"max\"]\n prime_params.postref.reflecting_range.d_min = hres[\"mean\"]\n prime_params.postref.reflecting_range.d_max = lres[\"max\"]\n prime_params.postref.unit_cell.d_min = hres[\"mean\"]\n prime_params.postref.unit_cell.d_max = lres[\"max\"]\n prime_params.postref.allparams.d_min = hres[\"mean\"]\n prime_params.postref.allparams.d_max = lres[\"max\"]\n prime_params.merge.d_min = hres[\"mean\"]\n prime_params.merge.d_max = lres[\"max\"]\n prime_params.target_unit_cell = uctbx.unit_cell(self.best_uc)\n prime_params.target_space_group = self.best_pg\n prime_params.target_crystal_system = crystal_system\n prime_params.pixel_size_mm = pixel_size\n prime_params.n_residues = 500\n prime_params.indexing_ambiguity.n_sample_frames = idx_ambiguity_sample\n prime_params.indexing_ambiguity.n_selected_frames = idx_ambiguity_selected\n\n # Determine which queue to run on (i.e. match IOTA queue)\n # Modify specific options based in IOTA settings\n # Queue options\n if self.params.mp.method == \"lsf\" and self.params.mp.queue is not None:\n prime_params.queue.mode = \"bsub\"\n prime_params.queue.qname = self.params.mp.queue\n\n # Number of processors (automatically, 1/2 of IOTA procs)\n prime_params.n_processors = int(self.params.mp.n_processors / 2)\n\n # Generate PRIME param PHIL\n prime_phil = mod_input.master_phil.format(python_object=prime_params)\n prime_file = os.path.join(self.info.int_base, filename)\n with open(prime_file, \"w\") as pf:\n pf.write(prime_phil.as_str())\n\n return prime_phil\n\n def run_get_results(self, finished_objects=None):\n self.info.have_results = self.get_results(finished_objects=finished_objects)\n return self.info.have_results\n\n def run_all(self, get_results=True):\n if get_results:\n self.info.have_results = self.get_results()\n\n if self.info.have_results:\n try:\n self.print_results()\n except Exception as e:\n error = \"IOTA PRINTING ERROR: \" + e\n self.info.errors.append(error)\n\n try: # Using try block because it can fail silently\n self.unit_cell_analysis()\n except Exception as e:\n error = \"IOTA CLUSTERING ERROR: \" + e\n self.info.errors.append(error)\n\n try:\n self.print_summary()\n except Exception as e:\n error = \"IOTA SUMMARY ERROR: \" + e\n self.info.errors.append(error)\n\n try:\n self.make_prime_input()\n except Exception as e:\n error = \"IOTA PRIME INPUT ERROR: \" + e\n self.info.errors.append(error)\n\n return self.info\n"
]
| [
[
"numpy.max",
"matplotlib.pyplot.text",
"numpy.zeros",
"numpy.median",
"matplotlib.pyplot.pcolor",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.std",
"matplotlib.pyplot.hist",
"numpy.argmax",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.