修复预测脚本和训练脚本的执行bug
This commit is contained in:
@@ -35,26 +35,25 @@
|
||||
*/
|
||||
'''
|
||||
import argparse
|
||||
from locale import normalize
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
from torch.utils.data import TensorDataset, DataLoader
|
||||
|
||||
from FC_ML_Data.FC_ML_Data_Load.Data_Load_Excel import get_data_from_csv_feature, get_train_data_from_csv
|
||||
from FC_ML_Data.FC_ML_Data_Load.Data_Load_Excel import get_train_data_from_csv
|
||||
from FC_ML_Data.FC_ML_Data_Output.Data_Output_Pytorch import export_model
|
||||
from FC_ML_Loss_Function.Loss_Function_Selector import LossFunctionSelector
|
||||
from FC_ML_Model.Model_Train_Data import TrainData
|
||||
from FC_ML_NN_Model.Poly_Model import PolyModel
|
||||
from FC_ML_Optim_Function.Optimizer_Selector import OptimizerSelector
|
||||
from FC_ML_Tool.Serialization import parse_json_file
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='代理模型训练参数输入')
|
||||
parser.add_argument('--param', default='D:\liyong\project\TVS_ML\FC_ML_Baseline\FC_ML_Baseline_Test\Train\param.json',
|
||||
parser.add_argument('--param', default='D:\liyong\project\ModelTrainingPython\FC_ML_Baseline\FC_ML_Baseline_Test\Train\param.json',
|
||||
help='配置参数文件绝对路径')
|
||||
args = parser.parse_args()
|
||||
params = parse_json_file(args.param)
|
||||
train_data = torch.tensor()
|
||||
print(params)
|
||||
# print(params)
|
||||
#获取训练参数
|
||||
input_Size = params["algorithmParam"]["inputSize"]#输入特征维度
|
||||
@@ -73,11 +72,15 @@ if __name__ == '__main__':
|
||||
dispose_method = params["algorithmParam"]["disposeMethod"] # 数据预处理方法
|
||||
data_no_order = params["algorithmParam"]["dataNoOrder"] # 训练数据是否乱序处理
|
||||
#加载所有训练数据
|
||||
train_data = []
|
||||
source_dir = params["path"] + "/"
|
||||
for data_file in params["files"]:
|
||||
data_file_path = source_dir + data_file
|
||||
ori_data,normalize = get_train_data_from_csv(data_file_path,pre_dispose_data,dispose_method)
|
||||
torch.cat((train_data,ori_data),dim=0)#按行拼接
|
||||
if len(train_data) == 0:
|
||||
train_data = deepcopy(ori_data)
|
||||
else:
|
||||
train_data = torch.cat((train_data,ori_data),dim=0)#按行拼接
|
||||
#拆分测试集和训练集
|
||||
split = int(training_ratio / 100 * len(train_data))
|
||||
train_dataset = TensorDataset(train_data[:split,0:input_Size], train_data[:split,input_Size:])
|
||||
@@ -140,11 +143,11 @@ if __name__ == '__main__':
|
||||
#每100次迭代输出一次损失数值
|
||||
if epoch % round_print == 0:
|
||||
print(
|
||||
f"Epoch {epoch} | Train Loss: {avg_train_loss:.4f} | Test Loss: {avg_test_loss:.4f} | 损失比: {avg_train_loss / avg_test_loss:.2f}:1")
|
||||
f"Epoch {epoch} | Train Loss: {avg_train_loss:.4f} | Test Loss: {avg_test_loss:.4f} | Loss Factor: {avg_train_loss / avg_test_loss:.2f}:1")
|
||||
with open(source_dir + "training.log", "a") as f:
|
||||
f.write(f"Epoch {epoch} | Train Loss: {avg_train_loss:.4f} | Test Loss: {avg_test_loss:.4f} | 损失比: {avg_train_loss / avg_test_loss:.2f}:1\n") # 自动换行追加
|
||||
f.write(f"Epoch {epoch} | Train Loss: {avg_train_loss:.4f} | Test Loss: {avg_test_loss:.4f} | Loss Factor: {avg_train_loss / avg_test_loss:.2f}:1\n") # 自动换行追加
|
||||
#导出训练后的模型
|
||||
export_model(model,source_dir,"model",export_format)
|
||||
export_model(model,source_dir,"model",export_format,torch.randn(1, input_Size))
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user