class torch.nn.KLDivLoss(size
来源: 时间:2026-04-26 10:08
import os gpus = [0] os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ["CUDA_VISIBLE_DEVICES"] = '0' import numpy as np import pandas as pd import random import datetime import time from pandas import ExcelWriter from torchsummary import summary import torch from torch.backends import cudnn from utils import calMetrics from utils import calculatePerClass from utils import numberClassChannel import math import warnings warnings.filterwarnings("ignore") cudnn.benchmark = False cudnn.deterministic = True import torch from torch import nn from torch import Tensor from einops.layers.torch import Rearrange, Reduce from einops import rearrange, reduce, repeat import torch.nn.functional as F from utils import numberClassChannel from utils import load_data_evaluate import numpy as np import pandas as pd from torch.autograd import Variable class GRN_v3(nn.Module): """ GRN-v3: Generalized Residual Network version 3 with input-dependent weights 根据论文公式: g_t(x) = (G_t ⊙ (b_t + w_t)) 1 w_t = 1 σ(w_t^T G_t) """ def __init__(self, max_layers, emb_size): super().__init__() self.emb_size = emb_size # 维度相关权重 b_t ∈ R^{d×t},论文中初始化为0 self.b_t = nn.Parameter(torch.zeros(max_layers, emb_size)) # 输入相关权重参数 w_t ∈ R^{d×1},论文中初始化为0 self.w_t = nn.Parameter(torch.zeros(emb_size, max_layers)) def forward(self, stack): """ stack: [batch_size, seq_len, num_layers, emb_size] 输出: [batch_size, seq_len, emb_size] """ batch_size, seq_len, num_layers, emb_size = stack.shape # 确保使用正确的层数 b_t = self.b_t[:num_layers, :] # [num_layers, emb_size] w_t = self.w_t[:, :num_layers] # [emb_size, num_layers] # 将stack重塑为 [batch_size*seq_len, num_layers, emb_size] stack_reshaped = stack.reshape(batch_size * seq_len, num_layers, emb_size) # 计算输入相关权重 w_t^T G_t,然后通过ReLU w_t_transposed = w_t.transpose(0, 1) # [num_layers, emb_size] # 计算 w_t^T G_t: 对每个样本和层,计算点积 w_t_contribution = torch.einsum('ble,le->bl', stack_reshaped, w_t_transposed) # [batch_size*seq_len, num_layers] # 通过ReLU激活 w_t_activated = F.relu(w_t_contribution) # [batch_size*seq_len, num_layers] # 扩展维度以匹配 b_t w_t_expanded = w_t_activated.unsqueeze(-1).expand(-1, -1, emb_size) # [batch_size*seq_len, num_layers, emb_size] # 计算总权重: b_t + w_t b_t_expanded = b_t.unsqueeze(0).expand(batch_size * seq_len, -1, -1) # [batch_size*seq_len, num_layers, emb_size] weights = b_t_expanded + w_t_expanded # [batch_size*seq_len, num_layers, emb_size] # 计算加权和: (G_t ⊙ weights) sum over layers weighted_sum = torch.sum(stack_reshaped * weights, dim=1) # [batch_size*seq_len, emb_size] # 重塑回原始形状 output = weighted_sum.reshape(batch_size, seq_len, emb_size) return output class PatchEmbeddingCNN(nn.Module): def __init__(self, f1=16, kernel_size=64, D=2, pooling_size1=8, pooling_size2=8, dropout_rate=0.3, number_channel=22, emb_size=40): super().__init__() f2 = D*f1 self.cnn_module = nn.Sequential( # temporal conv kernel size 64=0.25fs nn.Conv2d(1, f1, (1, kernel_size), (1, 1), padding='same', bias=False), # [batch, 22, 1000] nn.BatchNorm2d(f1), # channel depth-wise conv nn.Conv2d(f1, f2, (number_channel, 1), (1, 1), groups=f1, padding='valid', bias=False), # nn.BatchNorm2d(f2), nn.ELU(), # average pooling 1 nn.AvgPool2d((1, pooling_size1)), # pooling acts as slicing to obtain 'patch' along the time dimension as in ViT nn.Dropout(dropout_rate), # spatial conv nn.Conv2d(f2, f2, (1, 16), padding='same', bias=False), nn.BatchNorm2d(f2), nn.ELU(), # average pooling 2 to adjust the length of feature into transformer encoder nn.AvgPool2d((1, pooling_size2)), nn.Dropout(dropout_rate), ) self.projection = nn.Sequential( Rearrange('b e (h) (w) -> b (h w) e'), ) def forward(self, x: Tensor) -> Tensor: b, _, _, _ = x.shape x = self.cnn_module(x) x = self.projection(x) return x class MultiHeadAttention(nn.Module): def __init__(self, emb_size, num_heads, dropout): super().__init__() self.emb_size = emb_size self.num_heads = num_heads self.keys = nn.Linear(emb_size, emb_size) self.queries = nn.Linear(emb_size, emb_size) self.values = nn.Linear(emb_size, emb_size) self.att_drop = nn.Dropout(dropout) self.projection = nn.Linear(emb_size, emb_size) def forward(self, query: Tensor, key: Tensor, value: Tensor, mask: Tensor = None) -> Tensor: # 分别处理 Q, K, V queries = rearrange(self.queries(query), "b n (h d) -> b h n d", h=self.num_heads) keys = rearrange(self.keys(key), "b n (h d) -> b h n d", h=self.num_heads) values = rearrange(self.values(value), "b n (h d) -> b h n d", h=self.num_heads) energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys) if mask is not None: fill_value = torch.finfo(torch.float32).min energy.mask_fill(~mask, fill_value) scaling = self.emb_size ** (1 / 2) att = F.softmax(energy / scaling, dim=-1) att = self.att_drop(att) out = torch.einsum('bhal, bhlv -> bhav ', att, values) out = rearrange(out, "b h n d -> b n (h d)") out = self.projection(out) return out # PointWise FFN class FeedForwardBlock(nn.Sequential): def __init__(self, emb_size, expansion, drop_p): super().__init__( nn.Linear(emb_size, expansion * emb_size), nn.GELU(), nn.Dropout(drop_p), nn.Linear(expansion * emb_size, emb_size), ) class ClassificationHead(nn.Sequential): def __init__(self, flatten_number, n_classes): super().__init__() self.fc = nn.Sequential( nn.Dropout(0.5), nn.Linear(flatten_number, n_classes) ) def forward(self, x): out = self.fc(x) return out class ResidualAdd(nn.Module): def __init__(self, fn, emb_size, drop_p): super().__init__() self.fn = fn self.drop = nn.Dropout(drop_p) self.layernorm = nn.LayerNorm(emb_size) def forward(self, x, **kwargs): x_input = x res = self.fn(x, **kwargs) out = self.layernorm(self.drop(res)+x_input) return out class TransformerEncoderBlock(nn.Module): """ 修复:保持原始的TransformerEncoderBlock接口,但在内部使用DCA 为了兼容PyTorch的hook机制,保持forward函数只有一个输入参数 """ def __init__(self, emb_size, num_heads=4, drop_p=0.5, forward_expansion=4, forward_drop_p=0.5, max_layers=10): super().__init__() self.emb_size = emb_size self.num_heads = num_heads # 三个独立的GRN-v3实例,分别用于生成Q、K、V self.grn_q = GRN_v3(max_layers, emb_size) self.grn_k = GRN_v3(max_layers, emb_size) self.grn_v = GRN_v3(max_layers, emb_size) # 注意力机制 self.multihead_attn = MultiHeadAttention(emb_size, num_heads, drop_p) # 前馈网络 self.feed_forward = FeedForwardBlock(emb_size, forward_expansion, forward_drop_p) # 层归一化 self.norm1 = nn.LayerNorm(emb_size) self.norm2 = nn.LayerNorm(emb_size) # Dropout self.dropout1 = nn.Dropout(drop_p) self.dropout2 = nn.Dropout(forward_drop_p) # 存储当前层的输出,用于构建堆栈 self.layer_output = None def forward(self, x, stack=None): """ x: [batch_size, seq_len, emb_size] - 当前层的输入 stack: [batch_size, seq_len, num_layers, emb_size] - 之前所有层的输出堆栈 返回: [batch_size, seq_len, emb_size] """ batch_size, seq_len, emb_size = x.shape # 如果没有提供stack,则创建一个只包含当前输入的stack if stack is None: stack = x.unsqueeze(2) # [batch_size, seq_len, 1, emb_size] # 使用GRN从堆栈中生成Q、K、V Q = self.grn_q(stack) # [batch_size, seq_len, emb_size] K = self.grn_k(stack) # [batch_size, seq_len, emb_size] V = self.grn_v(stack) # [batch_size, seq_len, emb_size] # 多头注意力 attn_output = self.multihead_attn(Q, K, V) # [batch_size, seq_len, emb_size] # 第一个残差连接和层归一化 x = self.norm1(x + self.dropout1(attn_output)) # 前馈网络 ff_output = self.feed_forward(x) # 第二个残差连接和层归一化 out = self.norm2(x + self.dropout2(ff_output)) # 存储当前层的输出 self.layer_output = out.detach() return out class TransformerEncoder(nn.Module): """ 修改后的Transformer编码器,使用DCA块 维护一个堆栈存储所有层的输出 """ def __init__(self, heads, depth, emb_size, max_layers=10): super().__init__() self.depth = depth self.emb_size = emb_size self.max_layers = max_layers # 创建DCA块 self.blocks = nn.ModuleList([ TransformerEncoderBlock( emb_size=emb_size, num_heads=heads, drop_p=0.5, forward_expansion=4, forward_drop_p=0.5, max_layers=max_layers ) for _ in range(depth) ]) def forward(self, x): """ x: [batch_size, seq_len, emb_size] 返回: [batch_size, seq_len, emb_size] """ batch_size, seq_len, emb_size = x.shape # 初始化堆栈,包含初始输入 # 堆栈形状: [batch_size, seq_len, num_layers, emb_size] stack = x.unsqueeze(2) # [batch_size, seq_len, 1, emb_size] # 逐层处理 current_input = x for i, block in enumerate(self.blocks): # 确保堆栈不超过最大层数 if stack.size(2) > self.max_layers: # 如果超过最大层数,保留最后max_layers层 stack = stack[:, :, -self.max_layers:, :] # 通过TransformerEncoderBlock,传递stack output = block(current_input, stack) # 将输出添加到堆栈 output_expanded = output.unsqueeze(2) # [batch_size, seq_len, 1, emb_size] stack = torch.cat([stack, output_expanded], dim=2) # 更新当前输入 current_input = output return current_input class BranchEEGNetTransformer(nn.Sequential): def __init__(self, heads=4, depth=6, emb_size=40, number_channel=22, f1 = 20, kernel_size = 64, D = 2, pooling_size1 = 8, pooling_size2 = 8, dropout_rate = 0.3, **kwargs): super().__init__( PatchEmbeddingCNN(f1=f1, kernel_size=kernel_size, D=D, pooling_size1=pooling_size1, pooling_size2=pooling_size2, dropout_rate=dropout_rate, number_channel=number_channel, emb_size=emb_size), ) class PositioinalEncoding(nn.Module): def __init__(self, embedding, length=100, dropout=0.1): super().__init__() self.dropout = nn.Dropout(dropout) self.encoding = nn.Parameter(torch.randn(1, length, embedding)) def forward(self, x): # x-> [batch, embedding, length] x = x + self.encoding[:, :x.shape[1], :].cuda() return self.dropout(x) class EEGTransformer(nn.Module): def __init__(self, heads=4, emb_size=40, depth=6, database_type='A', eeg1_f1 = 20, eeg1_kernel_size = 64, eeg1_D = 2, eeg1_pooling_size1 = 8, eeg1_pooling_size2 = 8, eeg1_dropout_rate = 0.3, eeg1_number_channel = 22, flatten_eeg1 = 600, **kwargs): super().__init__() self.number_class, self.number_channel = numberClassChannel(database_type) self.emb_size = emb_size self.flatten_eeg1 = flatten_eeg1 self.depth = depth self.flatten = nn.Flatten() # print('self.number_channel', self.number_channel) self.cnn = BranchEEGNetTransformer(heads, depth, emb_size, number_channel=self.number_channel, f1 = eeg1_f1, kernel_size = eeg1_kernel_size, D = eeg1_D, pooling_size1 = eeg1_pooling_size1, pooling_size2 = eeg1_pooling_size2, dropout_rate = eeg1_dropout_rate, ) self.position = PositioinalEncoding(emb_size, dropout=0.1) # 使用修改后的Transformer编码器,支持DCA self.trans = TransformerEncoder(heads, depth, emb_size, max_layers=depth+1) self.flatten = nn.Flatten() self.classification = ClassificationHead(self.flatten_eeg1 , self.number_class) # FLATTEN_EEGNet + FLATTEN_cnn_module def forward(self, x): cnn = self.cnn(x) # add label cnn = cnn * math.sqrt(self.emb_size) cnn = self.position(cnn) # 通过DCA Transformer编码器 trans = self.trans(cnn) # 特征融合(可选的残差连接) features = cnn + trans out = self.classification(self.flatten(features)) return features, out class ExP(): def __init__(self, nsub, data_dir, result_name, epochs=2000, number_aug=2, number_seg=8, gpus=[0], evaluate_mode = 'subject-dependent', heads=4, emb_size=40, depth=6, dataset_type='A', eeg1_f1 = 20, eeg1_kernel_size = 64, eeg1_D = 2, eeg1_pooling_size1 = 8, eeg1_pooling_size2 = 8, eeg1_dropout_rate = 0.3, flatten_eeg1 = 600, validate_ratio = 0.2, # 保留参数但不使用 learning_rate = 0.001, batch_size = 72, ): super(ExP, self).__init__() self.dataset_type = dataset_type self.batch_size = batch_size self.lr = learning_rate self.b1 = 0.5 self.b2 = 0.999 self.n_epochs = epochs self.nSub = nsub self.number_augmentation = number_aug self.number_seg = number_seg self.root = data_dir self.heads=heads self.emb_size=emb_size self.depth=depth self.result_name = result_name self.evaluate_mode = evaluate_mode self.Tensor = torch.cuda.FloatTensor self.LongTensor = torch.cuda.LongTensor self.criterion_cls = torch.nn.CrossEntropyLoss().cuda() self.number_class, self.number_channel = numberClassChannel(self.dataset_type) self.model = EEGTransformer( heads=self.heads, emb_size=self.emb_size, depth=self.depth, database_type=self.dataset_type, eeg1_f1=eeg1_f1, eeg1_D=eeg1_D, eeg1_kernel_size=eeg1_kernel_size, eeg1_pooling_size1 = eeg1_pooling_size1, eeg1_pooling_size2 = eeg1_pooling_size2, eeg1_dropout_rate = eeg1_dropout_rate, eeg1_number_channel = self.number_channel, flatten_eeg1 = flatten_eeg1, ).cuda() #self.model = nn.DataParallel(self.model, device_ids=gpus) self.model = self.model.cuda() self.model_filename = self.result_name + '/model_{}.pth'.format(self.nSub) # Segmentation and Reconstruction (S&R) data augmentation def interaug(self, timg, label): aug_data = [] aug_label = [] number_records_by_augmentation = self.number_augmentation * int(self.batch_size / self.number_class) number_segmentation_points = 1000 // self.number_seg for clsAug in range(self.number_class): cls_idx = np.where(label == clsAug + 1) tmp_data = timg[cls_idx] tmp_label = label[cls_idx] tmp_aug_data = np.zeros((number_records_by_augmentation, 1, self.number_channel, 1000)) for ri in range(number_records_by_augmentation): for rj in range(self.number_seg): rand_idx = np.random.randint(0, tmp_data.shape[0], self.number_seg) tmp_aug_data[ri, :, :, rj * number_segmentation_points:(rj + 1) * number_segmentation_points] = tmp_data[rand_idx[rj], :, :, rj * number_segmentation_points:(rj + 1) * number_segmentation_points] aug_data.append(tmp_aug_data) aug_label.append(tmp_label[:number_records_by_augmentation]) aug_data = np.concatenate(aug_data) aug_label = np.concatenate(aug_label) aug_shuffle = np.random.permutation(len(aug_data)) aug_data = aug_data[aug_shuffle, :, :] aug_label = aug_label[aug_shuffle] aug_data = torch.from_numpy(aug_data).cuda() aug_data = aug_data.float() aug_label = torch.from_numpy(aug_label-1).cuda() aug_label = aug_label.long() return aug_data, aug_label def get_source_data(self): (self.train_data, # (batch, channel, length) self.train_label, self.test_data, self.test_label) = load_data_evaluate(self.root, self.dataset_type, self.nSub, mode_evaluate=self.evaluate_mode) self.train_data = np.expand_dims(self.train_data, axis=1) # (288, 1, 22, 1000) self.train_label = np.transpose(self.train_label) self.allData = self.train_data self.allLabel = self.train_label[0] shuffle_num = np.random.permutation(len(self.allData)) # print("len(self.allData):", len(self.allData)) self.allData = self.allData[shuffle_num, :, :, :] # (288, 1, 22, 1000) # print("shuffle_num", shuffle_num) # print("self.allLabel", self.allLabel) self.allLabel = self.allLabel[shuffle_num] print('-'*20, "train size:", self.train_data.shape, "test size:", self.test_data.shape) # self.test_data = np.transpose(self.test_data, (2, 1, 0)) self.test_data = np.expand_dims(self.test_data, axis=1) self.test_label = np.transpose(self.test_label) self.testData = self.test_data self.testLabel = self.test_label[0] # standardize target_mean = np.mean(self.allData) target_std = np.std(self.allData) self.allData = (self.allData - target_mean) / target_std self.testData = (self.testData - target_mean) / target_std isSaveDataLabel = False #True if isSaveDataLabel: np.save("./gradm_data/train_data_{}.npy".format(self.nSub), self.allData) np.save("./gradm_data/train_lable_{}.npy".format(self.nSub), self.allLabel) np.save("./gradm_data/test_data_{}.npy".format(self.nSub), self.testData) np.save("./gradm_data/test_label_{}.npy".format(self.nSub), self.testLabel) # data shape: (trial, conv channel, electrode channel, time samples) return self.allData, self.allLabel, self.testData, self.testLabel def train(self): img, label, test_data, test_label = self.get_source_data() img = torch.from_numpy(img) label = torch.from_numpy(label - 1) dataset = torch.utils.data.TensorDataset(img, label) test_data = torch.from_numpy(test_data) test_label = torch.from_numpy(test_label - 1) test_dataset = torch.utils.data.TensorDataset(test_data, test_label) self.test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=self.batch_size, shuffle=False) # Optimizers self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, betas=(self.b1, self.b2)) test_data = Variable(test_data.type(self.Tensor)) test_label = Variable(test_label.type(self.LongTensor)) # recording train_acc, train_loss, test_acc, test_loss result_process = [] # 保存最佳测试准确率的模型 best_test_acc = 0.0 best_epoch = 0 # Train the cnn model for e in range(self.n_epochs): self.dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=True) epoch_process = {} epoch_process['epoch'] = e # 训练阶段 self.model.train() for i, (img, label) in enumerate(self.dataloader): # 使用全部训练数据 img = Variable(img.type(self.Tensor)) label = Variable(label.type(self.LongTensor)) # data augmentation aug_data, aug_label = self.interaug(self.allData, self.allLabel) # concat real train dataset and generate aritifical train dataset img = torch.cat((img, aug_data)) label = torch.cat((label, aug_label)) # training model features, outputs = self.model(img) loss = self.criterion_cls(outputs, label) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # 记录训练集损失和准确率 train_pred = torch.max(outputs, 1)[1] train_acc = float((train_pred == label).cpu().numpy().astype(int).sum()) / float(label.size(0)) epoch_process['train_acc'] = train_acc epoch_process['train_loss'] = loss.detach().cpu().numpy() # 测试阶段 if (e + 1) % 1 == 0: self.model.eval() outputs_list = [] with torch.no_grad(): for i, (img, _) in enumerate(self.test_dataloader): # test model img = img.type(self.Tensor).cuda() _, Cls = self.model(img) outputs_list.append(Cls) del img, Cls torch.cuda.empty_cache() Cls = torch.cat(outputs_list) test_loss = self.criterion_cls(Cls, test_label) test_pred = torch.max(Cls, 1)[1] test_acc = float((test_pred == test_label).cpu().numpy().astype(int).sum()) / float(test_label.size(0)) epoch_process['test_acc'] = test_acc epoch_process['test_loss'] = test_loss.detach().cpu().numpy() # 保存最佳测试准确率的模型 if test_acc > best_test_acc: best_test_acc = test_acc best_epoch = e torch.save(self.model, self.model_filename) print("{}_{} train_acc: {:.4f} train_loss: {:.6f}ttest_acc: {:.6f} test_loss: {:.7f}".format(self.nSub, epoch_process['epoch'], epoch_process['train_acc'], epoch_process['train_loss'], epoch_process['test_acc'], epoch_process['test_loss'], )) result_process.append(epoch_process) torch.cuda.empty_cache() # load best model for final test self.model.eval() self.model = torch.load(self.model_filename).cuda() outputs_list = [] with torch.no_grad(): for i, (img, label) in enumerate(self.test_dataloader): img_test = Variable(img.type(self.Tensor)).cuda() _, outputs = self.model(img_test) val_pred = torch.max(outputs, 1)[1] outputs_list.append(outputs) outputs = torch.cat(outputs_list) y_pred = torch.max(outputs, 1)[1] test_acc = float((y_pred == test_label).cpu().numpy().astype(int).sum()) / float(test_label.size(0)) print("best epoch: {}, test accuracy: {:.4f}".format(best_epoch, test_acc)) df_process = pd.DataFrame(result_process) return test_acc, test_label, y_pred, df_process, best_epoch def main(dirs, evaluate_mode = 'subject-dependent', # 评估模式:LOSO(跨个体)或其他(subject-dependent, subject-specific), heads=8, # heads of MHA emb_size=48, # token embding dim depth=3, # Transformer encoder depth database_type='A', # A->'BCI IV2a', B->'BCI IV2b' eeg1_f1=20, # features of temporal conv eeg1_kernel_size=64, # kernel size of temporal conv eeg1_D=2, # depth-wise conv eeg1_pooling_size1=8,# p1 eeg1_pooling_size2=8,# p2 eeg1_dropout_rate=0.3, flatten_eeg1=600, validate_ratio = 0.2 # 保留参数但不使用 ): if not os.path.exists(dirs): os.makedirs(dirs) result_write_metric = ExcelWriter(dirs+"/result_metric.xlsx") result_metric_dict = {} y_true_pred_dict = { } process_write = ExcelWriter(dirs+"/process_train.xlsx") pred_true_write = ExcelWriter(dirs+"/pred_true.xlsx") subjects_result = [] best_epochs = [] for i in range(N_SUBJECT): starttime = datetime.datetime.now() seed_n = np.random.randint(2024) print('seed is ' + str(seed_n)) random.seed(seed_n) np.random.seed(seed_n) torch.manual_seed(seed_n) torch.cuda.manual_seed(seed_n) torch.cuda.manual_seed_all(seed_n) index_round =0 print('Subject %d' % (i+1)) exp = ExP(i + 1, DATA_DIR, dirs, EPOCHS, N_AUG, N_SEG, gpus, evaluate_mode = evaluate_mode, heads=heads, emb_size=emb_size, depth=depth, dataset_type=database_type, eeg1_f1 = eeg1_f1, eeg1_kernel_size = eeg1_kernel_size, eeg1_D = eeg1_D, eeg1_pooling_size1 = eeg1_pooling_size1, eeg1_pooling_size2 = eeg1_pooling_size2, eeg1_dropout_rate = eeg1_dropout_rate, flatten_eeg1 = flatten_eeg1, validate_ratio = validate_ratio ) testAcc, Y_true, Y_pred, df_process, best_epoch = exp.train() true_cpu = Y_true.cpu().numpy().astype(int) pred_cpu = Y_pred.cpu().numpy().astype(int) df_pred_true = pd.DataFrame({'pred': pred_cpu, 'true': true_cpu}) df_pred_true.to_excel(pred_true_write, sheet_name=str(i+1)) y_true_pred_dict[i] = df_pred_true accuracy, precison, recall, f1, kappa = calMetrics(true_cpu, pred_cpu) subject_result = {'accuray': accuracy*100, 'precision': precison*100, 'recall': recall*100, 'f1': f1*100, 'kappa': kappa*100 } subjects_result.append(subject_result) df_process.to_excel(process_write, sheet_name=str(i+1)) best_epochs.append(best_epoch) print(' THE BEST ACCURACY IS ' + str(testAcc) + "tkappa is " + str(kappa) ) endtime = datetime.datetime.now() print('subject %d duration: '%(i+1) + str(endtime - starttime)) if i == 0: yt = Y_true yp = Y_pred else: yt = torch.cat((yt, Y_true)) yp = torch.cat((yp, Y_pred)) df_result = pd.DataFrame(subjects_result) process_write.close() pred_true_write.close() print('**The average Best accuracy is: ' + str(df_result['accuray'].mean()) + "kappa is: " + str(df_result['kappa'].mean()) + "n" ) print("best epochs: ", best_epochs) #df_result.to_excel(result_write_metric, index=False) result_metric_dict = df_result mean = df_result.mean(axis=0) mean.name = 'mean' std = df_result.std(axis=0) std.name = 'std' df_result = pd.concat([df_result, pd.DataFrame(mean).T, pd.DataFrame(std).T]) df_result.to_excel(result_write_metric, index=False) print('-'*9, ' all result ', '-'*9) print(df_result) print("*"*40) result_write_metric.close() return result_metric_dict if __name__ == "__main__": #---------------------------------------- DATA_DIR = r'./mymat_raw/' EVALUATE_MODE = 'LOSO-No' # leaving one subject out subject-dependent subject-indenpedent N_SUBJECT = 9 # BCI N_AUG = 1 # data augmentation times for benerating artificial training data set N_SEG = 8 # segmentation times for S&R EPOCHS = 1000 EMB_DIM = 16 HEADS = 2 DEPTH = 6 TYPE = 'B' validate_ratio = 0.3 # 保留参数但不使用 EEGNet1_F1 = 8 EEGNet1_KERNEL_SIZE=64 EEGNet1_D=2 EEGNet1_POOL_SIZE1 = 8 EEGNet1_POOL_SIZE2 = 8 FLATTEN_EEGNet1 = 240 if EVALUATE_MODE!='LOSO': EEGNet1_DROPOUT_RATE = 0.5 else: EEGNet1_DROPOUT_RATE = 0.25 parameters_list = ['A'] for TYPE in parameters_list: number_class, number_channel = numberClassChannel(TYPE) RESULT_NAME = "CTNet_DCA_no_var_{}_heads_{}_depth_{}_{}".format(TYPE, HEADS, DEPTH, int(time.time())) sModel = EEGTransformer( heads=HEADS, emb_size=EMB_DIM, depth=DEPTH, database_type=TYPE, eeg1_f1=EEGNet1_F1, eeg1_D=EEGNet1_D, eeg1_kernel_size=EEGNet1_KERNEL_SIZE, eeg1_pooling_size1 = EEGNet1_POOL_SIZE1, eeg1_pooling_size2 = EEGNet1_POOL_SIZE2, eeg1_dropout_rate = EEGNet1_DROPOUT_RATE, eeg1_number_channel = number_channel, flatten_eeg1 = FLATTEN_EEGNet1, ).cuda() summary(sModel, (1, number_channel, 1000)) print(time.asctime(time.localtime(time.time()))) result = main(RESULT_NAME, evaluate_mode = EVALUATE_MODE, heads=HEADS, emb_size=EMB_DIM, depth=DEPTH, database_type=TYPE, eeg1_f1 = EEGNet1_F1, eeg1_kernel_size = EEGNet1_KERNEL_SIZE, eeg1_D = EEGNet1_D, eeg1_pooling_size1 = EEGNet1_POOL_SIZE1, eeg1_pooling_size2 = EEGNet1_POOL_SIZE2, eeg1_dropout_rate = EEGNet1_DROPOUT_RATE, flatten_eeg1 = FLATTEN_EEGNet1, validate_ratio = validate_ratio, ) print(time.asctime(time.localtime(time.time())))仅对模型进行提升,保持原有的训练测试逻辑,实现准确率的提升最新发布
相关知识
class torch.nn.KLDivLoss(size
Matlab中的N=size(X,2)是什么意思
Vegetable Container Size Chart & Pot Size Calculator
Plant Container Size Chart
5 Gallon Pot Size for Gardening Enthusiasts
Vegetable Container Size Standards (With Chart)
Pot size inches to gallon to liters conversion
Container Size Chart: Plant Pot Sizing Guide
class id区别
js 调用class
网址: class torch.nn.KLDivLoss(size https://www.huajiangbk.com/newsview2596772.html
| 上一篇: 哈尔滨新新花园具体地址,位置地图 |
下一篇: 常州新何花园介绍 |
推荐分享
- 1君子兰什么品种最名贵 十大名 4012
- 2世界上最名贵的10种兰花图片 3364
- 3花圈挽联怎么写? 3286
- 4迷信说家里不能放假花 家里摆 1878
- 5香山红叶什么时候红 1493
- 6花的意思,花的解释,花的拼音 1210
- 7教师节送什么花最合适 1167
- 8勿忘我花图片 1103
- 9橄榄枝的象征意义 1093
- 10洛阳的市花 1039
分享热点排名
