阅读提示:为防止内容获取不全,请勿使用浏览器阅读模式。
train_sum_tp=0
train_sum_tn=0
fori,datainenumerate(trainDataLoader):
.train()
inputs,bels=data
inputs=inputs.unseeze(1).to(torch.float32)
bels=bels.type(torch.LongTensor)
inputs,bels=inputs.to(device),bels.to(device)
outputs=(inputs)
loss=loss_func(outputs,bels)
optimizer.zero_grad()
loss.backard()
optimizer.step()
_,pred=torch.max(outputs.data,dim=1)
a=pred.eq(bels.data).cpu().sum()
one=torch.ones_like(bels)
zero=torch.zeros_like(bels)
tn=((bels==zero)*(pred==zero)).sum()
tp=((bels==one)*(pred==one)).sum()
fp=((bels==zero)*(pred==one)).sum()
fn=((bels==one)*(pred==zero)).sum()
train_sum_fn+=fn.item()
train_sum_fp+=fp.item()
train_sum_tn+=tn.item()
train_sum_tp+=tp.item()
train_sum_loss+=loss.item()
train_sum_rrect+=a.item()
train_loss=train_sum_loss*1.0len(trainDataLoader)
train_rrect=train_sum_rrect*1.0len(trainDataLoader)batch_size
train_precision=train_sum_tp*1.0(train_sum_fp+train_sum_tp)
train_recall=train_sum_tp*1.0(train_sum_fn+train_sum_tp)
riter.add_scar(“trainloss“,train_loss,global_step=epoch)
riter.add_scar(“trainrrect“,
train_rrect,global_step=epoch)
riter.add_scar(“trainprecision“,
train_precision,global_step=epoch)
riter.add_scar(“trainrecall“,train_recall,global_step=epoch)
ifnotos.path.exists(“models_aug_CNN“):
os.mkdir(“models_
本章未完,请点击下一页继续阅读》》