# -*- coding: utf-8 -*-
import random
import time
from datetime import datetime
import torch.nn as nn
from torch.utils.data import TensorDataset
from tqdm import tqdm

from GDN_models.GDN import GDN
from args import get_parser
from eval_methods import pot_eval, adjust_predicts, calc_seq
from utils import *
from MFAM_AD import MFAM_AD
from AE import AE
from EncDec_AD import EncDec_AD
import matplotlib.pyplot as plt
from DCdetector.metrics.metrics import combine_all_evaluation_scores


def plot_losses(losses, label, plot=True):
    plt.plot(losses, label=label)
    plt.title("Training losses during training")
    plt.xlabel("Epoch")
    plt.ylabel("RMSE")
    plt.legend()
    # plt.savefig(f"{save_path}/train_losses.png", bbox_inches="tight")
    if plot:
        plt.show()
    plt.close()

# Setting a random seed
def get_random_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def get_score(values, out_dim):
    print("Predicting and calculating anomaly scores..")
    data = SlidingWindowDataset(values, window_size, target_dims)
    loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False)
    device = "cuda"
    print(model)
    model.eval()
    recons = []
    with torch.no_grad():
        for x, y in tqdm(loader):
            x = x.to(device)
            y = y.to(device)
            t = x[:, 1:, :]
            recon_x = torch.cat((x[:, 1:, :], y), dim=1)
            window_recon = model(recon_x)
            recons.append(window_recon[:, -1, :].detach().cpu().numpy())
    recons = np.concatenate(recons, axis=0)
    actual = values.detach().cpu().numpy()[window_size:]

    if target_dims is not None:
        actual = actual[:, target_dims]
    anomaly_scores = np.zeros_like(actual)
    df_dict = {}
    # preds.shape[1] = 34
    for i in range(out_dim):
        df_dict[f"Recon_{i}"] = recons[:, i]
        df_dict[f"True_{i}"] = actual[:, i]
        gamma = args.gamma
        a_score = gamma * np.sqrt(
            (recons[:, i] - actual[:, i]) ** 2)
        if args.scale_scores:
            q75, q25 = np.percentile(a_score, [75, 25])
            iqr = q75 - q25
            median = np.median(a_score)
            a_score = (a_score - median) / (1 + iqr)
        anomaly_scores[:, i] = a_score
        df_dict[f"A_Score_{i}"] = a_score
    df = pd.DataFrame(df_dict)
    anomaly_scores = np.mean(anomaly_scores, 1)
    df['A_Score_Global'] = anomaly_scores
    return df

def backprop(epoch, model, data, dataO, optimizer, scheduler, training=True):
    l = nn.MSELoss(reduction='none')
    feats = dataO.shape[1]
    data_x = torch.DoubleTensor(data);
    dataset = TensorDataset(data_x, data_x)
    bs = model.batch if training else len(data)
    dataloader = DataLoader(dataset, batch_size=bs)
    for d, _ in dataloader:
        window = d.permute(1, 0, 2)
        elem = window[-1, :, :].view(1, bs, feats)
        z = model(window, elem)
        if isinstance(z, tuple): z = z[1]
    loss = l(z, elem)[0]
    return loss.detach().numpy(), z.detach().numpy()[0]


# def get_avg(objects, name):
#     totalf1 = sum(obj['f1'] for obj in objects)
#     averagef1 = totalf1 / len(objects)
#     totalprecision = sum(obj['precision'] for obj in objects)
#     averageprecision = totalprecision / len(objects)
#     totalrecall = sum(obj['recall'] for obj in objects)
#     averagerecall = totalrecall / len(objects)
#     # totalf1 = sum(obj.f1 for obj in objects)
#     # averagef1 = totalf1 / len(objects)
#     print(name)
#     print('f1: ')
#     print(averagef1)
#     print('precision: ')
#     print(averageprecision)
#     print('recall: ')
#     print(averagerecall)

def evaluate(model, data_loader):
    model.eval()
    recon_b_losses = []
    with torch.no_grad():
        for x, y in data_loader:
            x = x.cuda()
            recons = model(x)
            if target_dims is not None:
                x = x[:, :, target_dims]
            recon_loss = torch.sqrt(recon_criterion(x, recons))
            recon_b_losses.append(recon_loss.item())
        recon_b_losses = np.array(recon_b_losses)
        recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())
    return recon_epoch_loss

def evaluator(data_loader):
    model.eval()
    b_loss = []
    if modelName == "GDN":
        loss_func = nn.MSELoss(reduction='none')
        with torch.no_grad():
            for x, y in data_loader:
                x = x.cuda()
                y = y.cuda()
                output = model(x.permute(0, 2, 1))
                if target_dims is not None:
                    x = x[:, :, target_dims]
                loss = loss_func(output, y.squeeze(1))
                b_loss.append(torch.mean(loss, dim=1))
    return torch.cat(b_loss).detach().cpu().numpy()

# Our provides codes except for TranAD and DCdetector
if __name__ == "__main__":

    get_random_seed(42)
    id = datetime.now().strftime("%d%m%Y_%H%M%S")
    parser = get_parser()
    args = parser.parse_args()
    args.model = 'AE'
    args.model = 'MFAM-AD'
    # args.model = 'EncDec-AD'
    # args.model = 'IForest'
    # args.model = 'KNN'
    # args.model = 'PCA'
    # args.model = 'LOF'
    args.model = 'GDN'
    modelName = args.model
    args.lstm_n_layers = 3
    args.dropout = 0.1
    # args.dataset = "MSL"
    # args.dataset = "NIPS_TS_Swan"
    # args.dataset = "NIPS_TS_GECCO"
    args.dataset = "XSJ"
    dataset = args.dataset
    args.kernel_size = 7
    args.lookback = 30
    window_size = args.lookback

    spec_res = args.spec_res
    args.normalize = False
    normalize = args.normalize
    args.lstm_hid_dim = 50
    args.n_epochs = 30
    n_epochs = args.n_epochs
    args.bs = 256
    batch_size = args.bs
    args.init_lr = 0.001
    init_lr = args.init_lr
    val_split = args.val_split
    shuffle_dataset = args.shuffle_dataset
    use_cuda = args.use_cuda
    print_every = args.print_every
    log_tensorboard = args.log_tensorboard
    args.group = '1-1'
    group_index = args.group[0]
    index = args.group[2:]
    args_summary = str(args.__dict__)
    print(args_summary)

    level_q_dict = {
        "SMAP": (0.90, 0.005),
        "MSL": (0.90, 0.001),
        "SMD-1": (0.9950, 0.001),
        # "XSJ": (0.90, 0.001),
        "XSJ": (0.9969, 0.001),
        "NIPS_TS_Swan": (0.98, 0.005),
        "NIPS_TS_GECCO": (0.9949, 0.001),
    }
    key = "SMD-" + args.group[0] if args.dataset == "SMD" else args.dataset
    level, q = level_q_dict[key]
    if args.level is not None:
        level = args.level
    if args.q is not None:
        q = args.q

    if dataset == 'SMD':
        output_path = f'output/SMD/{args.group}'
        (x_train, _), (x_test, y_test) = get_data(f"machine-{group_index}-{index}", normalize=normalize)
    elif dataset in ['MSL', 'SMAP']:
        output_path = f'output/{dataset}'
        (x_train, _), (x_test, y_test) = get_data(dataset, normalize=normalize)
    elif dataset == 'XSJ':
        output_path = f'output/{dataset}'
        (x_train, _), (x_test, y_test) = get_data(dataset, normalize=normalize)
    elif dataset in ['NIPS_TS_GECCO', 'NIPS_TS_Swan']:
        output_path = f'output/{dataset}'
        (x_train, _), (x_test, y_test) = get_data_other(dataset, normalize=normalize)
    else:
        raise Exception(f'Dataset "{dataset}" not available.')

    log_dir = f'{output_path}/logs'
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    save_path = f"{output_path}/xsj"

    x_train = torch.from_numpy(x_train).float()
    x_test = torch.from_numpy(x_test).float()
    n_features = x_train.shape[1]

    target_dims = get_target_dims(dataset)
    if target_dims is None:
        out_dim = n_features
    elif type(target_dims) == int:
        out_dim = 1
    else:
        out_dim = len(target_dims)

    train_dataset = SlidingWindowDataset(x_train, window_size, target_dims)
    test_dataset = SlidingWindowDataset(x_test, window_size, target_dims)

    train_loader, val_loader, test_loader = create_data_loaders(
        train_dataset, batch_size, val_split, shuffle_dataset, test_dataset=test_dataset
    )
    if modelName == "MFAM-AD":
        model = MFAM_AD(
            n_features,
            window_size,
            out_dim,
            kernel_size=5,
            lstm_n_layers=args.lstm_n_layers,
            lstm_hid_dim=args.lstm_hid_dim,
            recon_hid_dim=args.lstm_hid_dim,
            dropout=args.dropout,
        )
        optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr)
        # criterion = nn.CosineSimilarity(dim=1, eps=1e-6)
        # criterion = nn.CosineEmbeddingLoss()
        recon_criterion = nn.MSELoss()
        model.cuda()
        print(f"Training model for {n_epochs} epochs..")
        train_start = time.time()
        lossList = []
        vallosslist = []
        for epoch in range(n_epochs):
            epoch_start = time.time()
            model.train()
            recon_b_losses = []
            for x, y in train_loader:
                x = x.cuda()
                y = y.cuda()
                recons = model(x)
                # y1, y2 = model(x)
                if target_dims is not None:
                    x = x[:, :, target_dims]
                # similarity = criterion(y1, y2)
                # loss = -torch.mean(similarity)
                # recon_b_losses.append(similarity.detach().cpu().numpy().mean())
                recon_loss = torch.sqrt(recon_criterion(x, recons))
                recon_b_losses.append(recon_loss.item())
                optimizer.zero_grad()
                # loss.backward()
                recon_loss.backward()
                optimizer.step()
                # for name, param in model.named_parameters():
                #     if param.grad is not None:
                #         print("Gradient of {}: {}".format(name, param.grad))
            recon_b_losses = np.array(recon_b_losses)
            # recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())
            recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())
            lossList.append(recon_epoch_loss)
            val_loss = evaluate(model, val_loader)
            vallosslist.append(val_loss)
            epoch_time = time.time() - epoch_start
            print(f"[Epoch {epoch + 1}] loss = {recon_epoch_loss:.5f} [{epoch_time:.1f}s]")
            print(f"val_loss = {val_loss:.5f}")
        train_time = int(time.time() - train_start)
        # plot_losses(lossList, 'trainloss')
        # plot_losses(vallosslist, 'valloss')
        print(f"-- Training done in {train_time}s.")
        print(f"-- Testing......")
        model.eval()
        recon_losses = []
        with torch.no_grad():
            for x, y in test_loader:
                x = x.cuda()
                y = y.cuda()
                recons = model(x)
                # y1, y2 = model(x)
                if target_dims is not None:
                    x = x[:, :, target_dims]
                # similarity = criterion(y1, y2)
                # loss = -torch.mean(similarity)
                # recon_losses.append(similarity.detach().cpu().numpy().mean())
                recon_loss = torch.sqrt(recon_criterion(x, recons))
                recon_losses.append(recon_loss.item())
        recon_losses = np.array(recon_losses)
        recon_loss = np.sqrt((recon_losses ** 2).mean())
        # recon_loss = np.sqrt((recon_b_losses ** 2).mean())
        print(f"Test total loss: {recon_loss:.5f}")
        label = y_test[window_size:] if y_test is not None else None
        train_pred_df = get_score(x_train, out_dim)
        test_pred_df = get_score(x_test, out_dim)
        train_anomaly_scores = train_pred_df['A_Score_Global'].values
        test_anomaly_scores = test_pred_df['A_Score_Global'].values
        # train_pred_df['A_Score_Global'] = train_anomaly_scores
        # test_pred_df['A_Score_Global'] = test_anomaly_scores
        #
        # np.save(f"datasets/{dataset}/train_loss.npy", train_anomaly_scores)
        # np.save(f"datasets/{dataset}/test_loss.npy", test_anomaly_scores)
        # np.save(f"datasets/{dataset}/label.npy", label)
        p_eval = pot_eval(train_anomaly_scores, test_anomaly_scores, label,
                          q=q, level=level, dynamic=False)
        print(f"Results using peak-over-threshold method:\n {p_eval}")


        print("save result")
        global_pot = p_eval["threshold"]
        test_pred_df["A_True_Global"] = label
        train_pred_df["Thresh_Global"] = global_pot
        test_pred_df["Thresh_Global"] = global_pot
        train_pred_df[f"A_Pred_Global"] = (train_anomaly_scores >= global_pot).astype(int)
        test_preds_global = (test_anomaly_scores >= global_pot).astype(int)
        if label is not None:
            test_preds_global = adjust_predicts(None, label, global_pot, pred=test_preds_global)
        test_pred_df[f"A_Pred_Global"] = test_preds_global
        yy_test = label.astype(int)
        print(combine_all_evaluation_scores(yy_test, test_preds_global, 0))

        # save_path = './datasets'
        # print(f"Saving output to {save_path}/<train/test>_output.pkl")
        # train_pred_df.to_pickle(f"{save_path}/s_train_output_1.pkl")
        # test_pred_df.to_pickle(f"{save_path}/s_test_output_1.pkl")
        # # train_pred_df.to_pickle(f"{save_path}/s_train_output.pkl")
        # # test_pred_df.to_pickle(f"{save_path}/s_test_output.pkl")
        # print("-- Done.")
    elif modelName == "AE":
        model = AE(n_features, out_features=2, window_size=window_size).cuda()
        # 定义损失函数和优化器
        loss_fn = nn.MSELoss()
        opt_AE = torch.optim.Adam(model.parameters(), lr=args.init_lr)
        # 保存损失值
        loss_ = []
        for epoch in range(n_epochs):
            epoch_start = time.time()
            model.train()
            recon_b_losses = []
            for x, y in train_loader:
                x = x.cuda()
                y = y.cuda()
                decoder_time = model(x)
                recon_loss = torch.sqrt(loss_fn(decoder_time, x))
                recon_b_losses.append(recon_loss.item())
                # loss = loss_fn(decoder_time, x)
                opt_AE.zero_grad()
                recon_loss.backward()
                opt_AE.step()
                # step_loss += loss.item()
            recon_b_losses = np.array(recon_b_losses)
            recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())
            epoch_time = time.time() - epoch_start
            loss_.append(recon_epoch_loss)
            print(f"[Epoch {epoch + 1}] "
                  f"loss = {recon_epoch_loss:.5f},"
                  f" [{epoch_time:.1f}s]")
        # plot_losses(loss_)
        model.eval()
        recon_losses = []
        with torch.no_grad():
            for x, y in test_loader:
                x = x.cuda()
                y = y.cuda()
                decoder_time = model(x)
                loss = loss_fn(decoder_time, x)
                recon_losses.append(loss.item())
        recon_losses = np.array(recon_losses)
        recon_loss = np.sqrt((recon_losses ** 2).mean())
        print(f"Test total loss: {recon_loss:.5f}")
        label = y_test[window_size:] if y_test is not None else None
        train_pred_df = get_score(x_train, out_dim)
        test_pred_df = get_score(x_test, out_dim)
        train_anomaly_scores = train_pred_df['A_Score_Global'].values
        test_anomaly_scores = test_pred_df['A_Score_Global'].values
        p_eval = pot_eval(train_anomaly_scores, test_anomaly_scores, label,
                          q=q, level=level, dynamic=False)
        print(f"Results using peak-over-threshold method:\n {p_eval}")
        global_epsilon = p_eval["threshold"]
        test_preds_global = (test_anomaly_scores >= global_epsilon).astype(int)
        yy_test = label.astype(int)
        print(combine_all_evaluation_scores(yy_test, test_preds_global, 0))
    elif modelName == "EncDec-AD":
        model = EncDec_AD(n_features, hidden_size=24).cuda()
        loss_fn = torch.nn.MSELoss()
        opt_AE = torch.optim.Adam(model.parameters(), lr=args.init_lr)
        loss_ = []
        for epoch in range(n_epochs):
            epoch_start = time.time()
            recon_b_losses = []
            model.train()
            for x, y in train_loader:
                x = x.cuda()
                y = y.cuda()
                decoder_time = model(x)
                recon_loss = torch.sqrt(loss_fn(decoder_time, x))
                recon_b_losses.append(recon_loss.item())
                # loss = loss_fn(decoder_time, x)
                opt_AE.zero_grad()
                recon_loss.backward()
                # loss.backward()
                opt_AE.step()
                # step_loss += loss.item()
            recon_b_losses = np.array(recon_b_losses)
            recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())
            epoch_time = time.time() - epoch_start
            # loss_.append(step_loss / dataLen)
            loss_.append(recon_epoch_loss)
            print(f"[Epoch {epoch + 1}] "
                  f"loss = {recon_epoch_loss:.5f},"
                  f" [{epoch_time:.1f}s]")
        # plot_losses(loss_)
        model.eval()
        recon_losses = []
        with torch.no_grad():
            for x, y in test_loader:
                x = x.cuda()
                y = y.cuda()
                decoder_time = model(x)
                loss = loss_fn(decoder_time, x)
                recon_losses.append(loss.item())
        recon_losses = np.array(recon_losses)
        recon_loss = np.sqrt((recon_losses ** 2).mean())
        print(f"Test total loss: {recon_loss:.5f}")
        label = y_test[window_size:] if y_test is not None else None
        train_pred_df = get_score(x_train, out_dim)
        test_pred_df = get_score(x_test, out_dim)
        train_anomaly_scores = train_pred_df['A_Score_Global'].values
        test_anomaly_scores = test_pred_df['A_Score_Global'].values
        p_eval = pot_eval(train_anomaly_scores, test_anomaly_scores, label,
                          q=q, level=level, dynamic=False)
        print(f"Results using peak-over-threshold method:\n {p_eval}")
        global_epsilon = p_eval["threshold"]
        test_preds_global = (test_anomaly_scores >= global_epsilon).astype(int)
        yy_test = label.astype(int)
        print(combine_all_evaluation_scores(yy_test, test_preds_global, 0))
    elif modelName == "GDN":
        edge_index = torch.cat((torch.arange(n_features).repeat(n_features).unsqueeze(0),
                                torch.arange(n_features).repeat_interleave(n_features).unsqueeze(0)),
                               dim=0).float().cuda()
        model = GDN([edge_index], n_features, input_dim=window_size, topk=5).cuda()
        # edge_index = edge_index.unsqueeze(0).repeat(batch_size, 1, 1)
        optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr)
        loss_func = nn.MSELoss(reduction='mean')
        loss_arr = []
        for epoch in range(n_epochs):
            epoch_start = time.time()
            model.train()
            b_loss = []
            mse_loss = []
            n = epoch + 1
            for x, y in train_loader:
                x = x.cuda()
                y = y.cuda()
                optimizer.zero_grad()
                output = model(x.permute(0, 2, 1))
                if target_dims is not None:
                    y = y[:, :, target_dims]
                    x = x[:, :, target_dims]
                #     output = output.squeeze(1)
                # loss = (1 - n / n_epochs) * loss_func(output[0], x) + (n / n_epochs) * loss_func(output[1], y)
                # loss = (1 / n) * loss_func(output[0], x) + (1 - 1 / n) * loss_func(output[1], y)
                # loss = loss_func(output[0], x) + loss_func(output[1], y)
                loss = loss_func(output, y.squeeze(1))
                b_loss.append(loss.item())
                loss.backward()
                optimizer.step()

            b_loss = np.array(b_loss)
            avg_loss = b_loss.mean()
            loss_arr.append(avg_loss)
            epoch_time = time.time() - epoch_start

            print(
                f"[Epoch {epoch + 1}] loss = {avg_loss:.5f}  [{epoch_time:.1f}s]")
        # plt.plot(np.array(loss_arr), 'b', label='loss')
        # plt.savefig(f"figure/{model.__class__.__name__}_{dataset}.jpg")
    # torch.save(model.state_dict(), os.path.join('./', str(args.model) + str(args.dataset) + '_checkpoint.pth'))
        print(f"train time: {time.time() - epoch_start}")
        test_loss = evaluator(test_loader)
        train_loss = evaluator(train_loader)
    elif modelName == 'IForest':
        from pyod.models.iforest import IForest

        clf_name = 'IForest'
        clf = IForest(random_state=42)
        clf.fit(x_train)

        y_train_pred = clf.labels_  # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)
        y_train_scores = clf.decision_scores_  # 返回训练数据上的异常值 (分值越大越异常)
        # 用训练好的clf来预测未知数据中的异常值
        y_test_pred = clf.predict(x_test)  # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)
        y_test_scores = clf.decision_function(x_test)
        result, _ = calc_seq(y_test_pred, y_test, 0.5)
        print("f1;", result[0])
        print("precision;", result[1])
        print("recall;", result[2])
        print("TP:", result[3])
        print("TN:", result[4])
        print("FP:", result[5])
        print("FN:", result[6])
        print("accuracy;", result[-1])
        yy_test = y_test.astype(int)
        print(combine_all_evaluation_scores(yy_test, y_test_pred, 0))
    elif modelName == 'PCA':
        from pyod.models.pca import PCA

        clf = PCA()  # 初始化检测器
        clf.fit(x_train)  # 使用X_train训练检测器clf
        # 返回训练数据X_train上的异常标签和异常分值
        y_train_pred = clf.labels_  # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)
        y_train_scores = clf.decision_scores_  # 返回训练数据上的异常值 (分值越大越异常)
        # 用训练好的clf来预测未知数据中的异常值
        y_test_pred = clf.predict(x_test)  # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)
        y_test_scores = clf.decision_function(x_test)
        result, _ = calc_seq(y_test_pred, y_test, 0.5)
        print("f1;", result[0])
        print("precision;", result[1])
        print("recall;", result[2])
        print("TP:", result[3])
        print("TN:", result[4])
        print("FP:", result[5])
        print("FN:", result[6])
        print("accuracy;", result[-1])
        yy_test = y_test.astype(int)
        print(combine_all_evaluation_scores(yy_test, y_test_pred, 0))
    elif modelName == 'KNN':
        from pyod.models.knn import KNN

        clf = KNN()  # 初始化检测器
        clf.fit(x_train)  # 使用X_train训练检测器clf
        # 返回训练数据X_train上的异常标签和异常分值
        y_train_pred = clf.labels_  # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)
        y_train_scores = clf.decision_scores_  # 返回训练数据上的异常值 (分值越大越异常)
        # 用训练好的clf来预测未知数据中的异常值
        y_test_pred = clf.predict(x_test)  # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)
        y_test_scores = clf.decision_function(x_test)
        result, _ = calc_seq(y_test_pred, y_test, 0.5)
        print("f1;", result[0])
        print("precision;", result[1])
        print("recall;", result[2])
        print("TP:", result[3])
        print("TN:", result[4])
        print("FP:", result[5])
        print("FN:", result[6])
        print("accuracy;", result[-1])
        yy_test = y_test.astype(int)
        print(combine_all_evaluation_scores(yy_test, y_test_pred, 0))
    elif modelName == 'LOF':
        from pyod.models.lof import LOF

        clf = LOF()  # 初始化检测器
        clf.fit(x_train)  # 使用X_train训练检测器clf
        # 返回训练数据X_train上的异常标签和异常分值
        y_train_pred = clf.labels_  # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)
        y_train_scores = clf.decision_scores_  # 返回训练数据上的异常值 (分值越大越异常)
        # 用训练好的clf来预测未知数据中的异常值
        y_test_pred = clf.predict(x_test)  # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)
        y_test_scores = clf.decision_function(x_test)
        result, _ = calc_seq(y_test_pred, y_test, 0.5)
        print("f1;", result[0])
        print("precision;", result[1])
        print("recall;", result[2])
        print("TP:", result[3])
        print("TN:", result[4])
        print("FP:", result[5])
        print("FN:", result[6])
        print("accuracy;", result[-1])
        yy_test = y_test.astype(int)
        print(combine_all_evaluation_scores(yy_test, y_test_pred, 0))
    # torch.save(model.state_dict(), os.path.join('./', str(args.model) + str(args.dataset) + '_checkpoint.pth'))
    # get_avg(e_eval_list, 'e_eval')
# get_avg(p_eval_list, 'p_eval')
# get_avg(bf_eval_list, 'bf_eval')
