deftest(tt_set, model, device): ''' 预测结果(测试集) ''' model.eval() preds = [] for x in tt_set: x = x.to(device) with torch.no_grad(): pred = model(x) preds.append(pred.detach().cpu()) preds = torch.cat(preds, dim=0).numpy() return preds
5. Setup Hyper-parameters
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
device = get_device() os.makedirs('models', exist_ok=True) target_only = True# TODO
Finished reading the train set of COVID19 Dataset (2430 samples found, each dim = 17)
Finished reading the dev set of COVID19 Dataset (270 samples found, each dim = 17)
Finished reading the test set of COVID19 Dataset (893 samples found, each dim = 17)
1 2
# 实例化模型 model = NeuralNet(tr_set.dataset.dim).to(device)
Saving model (epoch = 0, loss = 340.917373)
Saving model (epoch = 1, loss = 322.165056)
Saving model (epoch = 2, loss = 313.825131)
Saving model (epoch = 3, loss = 309.852996)
Saving model (epoch = 4, loss = 306.339627)
Saving model (epoch = 5, loss = 306.296543)
Saving model (epoch = 6, loss = 305.443566)
Saving model (epoch = 7, loss = 300.847960)
Saving model (epoch = 8, loss = 289.878357)
Saving model (epoch = 9, loss = 283.428931)
Saving model (epoch = 11, loss = 277.324653)
Saving model (epoch = 19, loss = 275.598712)
Saving model (epoch = 20, loss = 274.431580)
Saving model (epoch = 21, loss = 271.097685)
Saving model (epoch = 22, loss = 266.430409)
Saving model (epoch = 23, loss = 261.281929)
Saving model (epoch = 24, loss = 260.683929)
Saving model (epoch = 25, loss = 258.624722)
Saving model (epoch = 26, loss = 253.948218)
Saving model (epoch = 27, loss = 247.472347)
Saving model (epoch = 29, loss = 240.454434)
Saving model (epoch = 31, loss = 237.730461)
Saving model (epoch = 32, loss = 233.930249)
Saving model (epoch = 33, loss = 228.276315)
Saving model (epoch = 34, loss = 224.006245)
Saving model (epoch = 35, loss = 219.322504)
Saving model (epoch = 36, loss = 207.611922)
Saving model (epoch = 37, loss = 200.482134)
Saving model (epoch = 38, loss = 196.714348)
Saving model (epoch = 41, loss = 192.946156)
Saving model (epoch = 42, loss = 187.776669)
Saving model (epoch = 43, loss = 187.133918)
Saving model (epoch = 44, loss = 186.150606)
Saving model (epoch = 45, loss = 183.542729)
Saving model (epoch = 46, loss = 177.969274)
Saving model (epoch = 47, loss = 165.223580)
Saving model (epoch = 48, loss = 161.321226)
Saving model (epoch = 49, loss = 155.550834)
Saving model (epoch = 53, loss = 149.358119)
Saving model (epoch = 54, loss = 142.696106)
Saving model (epoch = 55, loss = 139.488191)
Saving model (epoch = 56, loss = 137.709078)
Saving model (epoch = 57, loss = 132.763100)
Saving model (epoch = 60, loss = 123.616633)
Saving model (epoch = 61, loss = 121.146074)
Saving model (epoch = 64, loss = 112.364984)
Saving model (epoch = 65, loss = 100.750797)
Saving model (epoch = 72, loss = 92.428780)
Saving model (epoch = 75, loss = 78.373004)
Saving model (epoch = 80, loss = 68.048881)
Saving model (epoch = 82, loss = 53.388715)
Saving model (epoch = 85, loss = 42.509547)
Saving model (epoch = 94, loss = 39.354231)
Saving model (epoch = 98, loss = 26.928447)
Epoch 100 finished.
Saving model (epoch = 101, loss = 22.667163)
Saving model (epoch = 106, loss = 9.036212)
Saving model (epoch = 113, loss = 7.133554)
Saving model (epoch = 141, loss = 5.964343)
Saving model (epoch = 160, loss = 5.780281)
Saving model (epoch = 184, loss = 5.398957)
Epoch 200 finished.
Saving model (epoch = 203, loss = 4.893500)
Saving model (epoch = 216, loss = 4.655919)
Saving model (epoch = 218, loss = 4.601365)
Saving model (epoch = 227, loss = 4.375972)
Saving model (epoch = 233, loss = 4.232011)
Saving model (epoch = 238, loss = 4.214206)
Saving model (epoch = 245, loss = 3.868370)
Saving model (epoch = 261, loss = 3.665027)
Saving model (epoch = 266, loss = 3.496048)
Saving model (epoch = 276, loss = 3.405258)
Saving model (epoch = 282, loss = 3.251506)
Saving model (epoch = 284, loss = 3.161817)
Epoch 300 finished.
Saving model (epoch = 301, loss = 3.101231)
Saving model (epoch = 306, loss = 2.750548)
Saving model (epoch = 307, loss = 2.730497)
Saving model (epoch = 318, loss = 2.705987)
Saving model (epoch = 321, loss = 2.625461)
Saving model (epoch = 326, loss = 2.563876)
Saving model (epoch = 342, loss = 2.365493)
Saving model (epoch = 343, loss = 2.225936)
Saving model (epoch = 348, loss = 2.199931)
Saving model (epoch = 359, loss = 2.137739)
Saving model (epoch = 360, loss = 2.051806)
Saving model (epoch = 362, loss = 1.994729)
Saving model (epoch = 364, loss = 1.951313)
Saving model (epoch = 370, loss = 1.903423)
Saving model (epoch = 372, loss = 1.817203)
Saving model (epoch = 386, loss = 1.771863)
Saving model (epoch = 394, loss = 1.617613)
Epoch 400 finished.
Saving model (epoch = 411, loss = 1.475305)
Saving model (epoch = 414, loss = 1.455347)
Saving model (epoch = 415, loss = 1.413618)
Saving model (epoch = 421, loss = 1.404032)
Saving model (epoch = 425, loss = 1.342632)
Saving model (epoch = 433, loss = 1.317732)
Saving model (epoch = 440, loss = 1.289960)
Saving model (epoch = 450, loss = 1.251974)
Saving model (epoch = 451, loss = 1.186455)
Saving model (epoch = 465, loss = 1.124176)
Saving model (epoch = 480, loss = 1.115070)
Saving model (epoch = 482, loss = 1.063177)
Saving model (epoch = 495, loss = 1.020159)
Epoch 500 finished.
Saving model (epoch = 505, loss = 0.998222)
Saving model (epoch = 525, loss = 0.996696)
Saving model (epoch = 527, loss = 0.974490)
Saving model (epoch = 528, loss = 0.946199)
Saving model (epoch = 548, loss = 0.934515)
Saving model (epoch = 549, loss = 0.930782)
Saving model (epoch = 552, loss = 0.918216)
Saving model (epoch = 562, loss = 0.888980)
Saving model (epoch = 586, loss = 0.885096)
Epoch 600 finished.
Saving model (epoch = 605, loss = 0.879943)
Saving model (epoch = 619, loss = 0.876455)
Saving model (epoch = 652, loss = 0.871618)
Saving model (epoch = 657, loss = 0.862073)
Saving model (epoch = 664, loss = 0.855746)
Epoch 700 finished.
Saving model (epoch = 736, loss = 0.848672)
Epoch 800 finished.
Saving model (epoch = 838, loss = 0.833972)
Epoch 900 finished.
Epoch 1000 finished.
Epoch 1100 finished.
Epoch 1200 finished.
Epoch 1300 finished.
Epoch 1400 finished.
Epoch 1500 finished.
Epoch 1600 finished.
Epoch 1700 finished.
Epoch 1800 finished.
Finished training after 1840 epochs, min_loss = 0.833972383428503
1 2
# 绘制训练过程中 loss 的情况 plot_learning_curve(model_loss_record, 'deep model')
defsave_pred(preds, file): ''' 保存预测结果到csv文件 ''' withopen(file, 'w') as fp: writer = csv.writer(fp) writer.writerow(['id', 'tested_positive']) for i, p inenumerate(preds): writer.writerow([i, p])