This commit is contained in:
xuhongzuo 2023-09-21 17:07:46 +08:00
commit 09eca95d19
9 changed files with 613 additions and 8 deletions

View File

@ -18,7 +18,7 @@ Python Deep Outlier/Anomaly Detection (DeepOD)
and `Anomaly Detection <https://en.wikipedia.org/wiki/Anomaly_detection>`_. ``DeepOD`` supports tabular anomaly detection and time-series anomaly detection.
DeepOD includes **25** deep outlier detection / anomaly detection algorithms (in unsupervised/weakly-supervised paradigm).
DeepOD includes **26** deep outlier detection / anomaly detection algorithms (in unsupervised/weakly-supervised paradigm).
More baseline algorithms will be included later.
@ -184,6 +184,7 @@ Implemented Models
:header: "Model", "Venue", "Year", "Type", "Title"
:widths: 4, 4, 4, 8, 20
DCdetector, KDD, 2023, unsupervised, DCdetector: Dual Attention Contrastive Representation Learning for Time Series Anomaly Detection [#Yang2023dcdetector]_
TimesNet, ICLR, 2023, unsupervised, TIMESNET: Temporal 2D-Variation Modeling for General Time Series Analysis [#Wu2023timesnet]_
AnomalyTransformer, ICLR, 2022, unsupervised, Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy [#Xu2022transformer]_
TranAD, VLDB, 2022, unsupervised, TranAD: Deep Transformer Networks for Anomaly Detection in Multivariate Time Series Data
@ -244,4 +245,6 @@ Reference
.. [#Xu2022transformer] Xu Jiehui, et al. "Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy". ICLR, 2022.
.. [#Wu2023timesnet] Wu Haixu, et al. "TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis". ICLR. 2023.
.. [#Wu2023timesnet] Wu Haixu, et al. "TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis". ICLR. 2023.
.. [#Yang2023dcdetector] Yang Yiyuan et al. "DCdetector: Dual Attention Contrastive Representation Learning for Time Series Anomaly Detection". KDD. 2023

View File

@ -20,6 +20,7 @@ from deepod.models.time_series.devnet import DevNetTS
from deepod.models.time_series.dif import DeepIsolationForestTS
from deepod.models.time_series.dsvdd import DeepSVDDTS
from deepod.models.time_series.dcdetector import DCdetector
from deepod.models.time_series.timesnet import TimesNet
from deepod.models.time_series.anomalytransformer import AnomalyTransformer
from deepod.models.time_series.tranad import TranAD
@ -31,7 +32,7 @@ from deepod.models.time_series.tcned import TcnED
__all__ = [
'RCA', 'DeepSVDD', 'GOAD', 'NeuTraL', 'RDP', 'ICL', 'SLAD', 'DeepIsolationForest',
'DeepSAD', 'DevNet', 'PReNet', 'FeaWAD', 'REPEN', 'RoSAS',
'TimesNet', 'AnomalyTransformer', 'TranAD', 'COUTA', 'USAD', 'TcnED',
'DCdetector', 'TimesNet', 'AnomalyTransformer', 'TranAD', 'COUTA', 'USAD', 'TcnED',
'DeepIsolationForestTS', 'DeepSVDDTS',
'PReNetTS', 'DeepSADTS', 'DevNetTS'
]

View File

@ -7,6 +7,7 @@ from .couta import COUTA
from .tcned import TcnED
from .anomalytransformer import AnomalyTransformer
from .timesnet import TimesNet
from .dcdetector import DCdetector
# weakly-supervised
from .dsad import DeepSADTS
@ -15,4 +16,4 @@ from .prenet import PReNetTS
__all__ = ['DeepIsolationForestTS', 'DeepSVDDTS', 'TranAD', 'USAD', 'COUTA',
'DeepSADTS', 'DevNetTS', 'PReNetTS', 'AnomalyTransformer', 'TimesNet']
'DeepSADTS', 'DevNetTS', 'PReNetTS', 'AnomalyTransformer', 'TimesNet', 'DCdetector']

View File

@ -0,0 +1,448 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader
import math
import time
from tkinter import _flatten
from einops import rearrange, reduce, repeat
from deepod.utils.utility import get_sub_seqs
from deepod.core.base_model import BaseDeepAD
def my_kl_loss(p, q):
res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))
return torch.mean(torch.sum(res, dim=-1), dim=1)
class DCdetector(BaseDeepAD):
def __init__(self, seq_len=100, stride=1, lr=0.0001, epochs=5, batch_size=128,
epoch_steps=20, prt_steps=1, device='cuda',
n_heads=1, d_model=256, e_layers=3, patch_size=None,
verbose=2, random_state=42):
super(DCdetector, self).__init__(
model_name='TranAD', data_type='ts', epochs=epochs, batch_size=batch_size, lr=lr,
seq_len=seq_len, stride=stride,
epoch_steps=epoch_steps, prt_steps=prt_steps, device=device,
verbose=verbose, random_state=random_state
)
if patch_size is None:
self.patch_size = [5] # seq_len must be divisible by patch_size
self.patch_size = patch_size
self.n_heads = n_heads
self.d_model = d_model
self.e_layers = e_layers
self.criterion = nn.MSELoss()
return
def fit(self, X, y=None):
self.n_features = X.shape[1]
train_seqs = get_sub_seqs(X, seq_len=self.seq_len, stride=self.stride)
self.model = DCdetectorModel(win_size=self.seq_len, enc_in=self.n_features, c_out=self.n_features, n_heads=self.n_heads,
d_model=self.d_model, e_layers=self.e_layers, patch_size=self.patch_size,
channel=self.n_features).to(self.device)
dataloader = DataLoader(train_seqs, batch_size=self.batch_size,
shuffle=True, pin_memory=True)
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.lr, weight_decay=1e-5)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=5, gamma=0.5)
self.model.train()
for e in range(self.epochs):
loss = self.training(dataloader)
print(f'Epoch {e + 1},\t L1 = {loss}')
self.decision_scores_ = self.decision_function(X)
self.labels_ = self._process_decision_scores()
return
def decision_function(self, X, return_rep=False):
seqs = get_sub_seqs(X, seq_len=self.seq_len, stride=1)
dataloader = DataLoader(seqs, batch_size=self.batch_size,
shuffle=False, drop_last=False)
self.model.eval()
loss, _ = self.inference(dataloader) # (n,d)
loss_final = np.mean(loss, axis=1) # (n,)
padding_list = np.zeros([X.shape[0]-loss.shape[0], loss.shape[1]])
loss_pad = np.concatenate([padding_list, loss], axis=0)
loss_final_pad = np.hstack([0 * np.ones(X.shape[0] - loss_final.shape[0]), loss_final])
return loss_final_pad
def training(self, dataloader):
loss_list = []
for ii, batch_x in enumerate(dataloader):
self.optimizer.zero_grad()
batch_x = batch_x.float().to(self.device)
series, prior = self.model(batch_x)
series_loss = 0.0
prior_loss = 0.0
for u in range(len(prior)):
series_loss += (torch.mean(my_kl_loss(series[u], (
prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)).detach())) + torch.mean(
my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)).detach(),
series[u])))
prior_loss += (torch.mean(my_kl_loss(
(prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)),
series[u].detach())) + torch.mean(
my_kl_loss(series[u].detach(), (
prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)))))
series_loss = series_loss / len(prior)
prior_loss = prior_loss / len(prior)
loss = prior_loss - series_loss
loss_list.append(loss.item())
loss.backward()
self.optimizer.step()
if self.epoch_steps != -1:
if ii > self.epoch_steps:
break
self.scheduler.step()
return np.average(loss_list)
def inference(self, dataloader):
temperature = 50
attens_energy = []
preds = []
for input_data in dataloader: # test_set
input = input_data.float().to(self.device)
series, prior = self.model(input)
series_loss = 0.0
prior_loss = 0.0
for u in range(len(prior)):
if u == 0:
series_loss = my_kl_loss(series[u], (
prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)).detach()) * temperature
prior_loss = my_kl_loss(
(prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)),
series[u].detach()) * temperature
else:
series_loss += my_kl_loss(series[u], (
prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)).detach()) * temperature
prior_loss += my_kl_loss(
(prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
self.seq_len)),
series[u].detach()) * temperature
metric = torch.softmax((-series_loss - prior_loss), dim=-1)
cri = metric.detach().cpu().numpy()
attens_energy.append(cri)
attens_energy = np.concatenate(attens_energy, axis=0) # anomaly scores
test_energy = np.array(attens_energy) # anomaly scores
return test_energy, preds # (n,d)
def training_forward(self, batch_x, net, criterion):
"""define forward step in training"""
return
def inference_forward(self, batch_x, net, criterion):
"""define forward step in inference"""
return
def training_prepare(self, X, y):
"""define train_loader, net, and criterion"""
return
def inference_prepare(self, X):
"""define test_loader"""
return
# Proposed Model
class Encoder(nn.Module):
def __init__(self, attn_layers, norm_layer=None):
super(Encoder, self).__init__()
self.attn_layers = nn.ModuleList(attn_layers)
self.norm = norm_layer
def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_mask=None):
series_list = []
prior_list = []
for attn_layer in self.attn_layers:
series, prior = attn_layer(x_patch_size, x_patch_num, x_ori, patch_index, attn_mask=attn_mask)
series_list.append(series)
prior_list.append(prior)
return series_list, prior_list
class DCdetectorModel(nn.Module):
def __init__(self, win_size, enc_in, c_out, n_heads=1, d_model=256, e_layers=3, patch_size=[3, 5, 7], channel=55,
d_ff=512, dropout=0.0, activation='gelu', output_attention=True):
super(DCdetectorModel, self).__init__()
self.output_attention = output_attention
self.patch_size = patch_size
self.channel = channel
self.win_size = win_size
# Patching List
self.embedding_patch_size = nn.ModuleList()
self.embedding_patch_num = nn.ModuleList()
for i, patchsize in enumerate(self.patch_size):
self.embedding_patch_size.append(DataEmbedding(patchsize, d_model, dropout))
self.embedding_patch_num.append(DataEmbedding(self.win_size // patchsize, d_model, dropout))
self.embedding_window_size = DataEmbedding(enc_in, d_model, dropout)
# Dual Attention Encoder
self.encoder = Encoder(
[
AttentionLayer(
DAC_structure(win_size, patch_size, channel, False, attention_dropout=dropout,
output_attention=output_attention),
d_model, patch_size, channel, n_heads, win_size) for l in range(e_layers)
],
norm_layer=torch.nn.LayerNorm(d_model)
)
self.projection = nn.Linear(d_model, c_out, bias=True)
def forward(self, x):
B, L, M = x.shape # Batch win_size channel
series_patch_mean = []
prior_patch_mean = []
revin_layer = RevIN(num_features=M)
# Instance Normalization Operation
x = revin_layer(x, 'norm')
x_ori = self.embedding_window_size(x)
# Mutil-scale Patching Operation
for patch_index, patchsize in enumerate(self.patch_size):
x_patch_size, x_patch_num = x, x
x_patch_size = rearrange(x_patch_size, 'b l m -> b m l') # Batch channel win_size
x_patch_num = rearrange(x_patch_num, 'b l m -> b m l') # Batch channel win_size
x_patch_size = rearrange(x_patch_size, 'b m (n p) -> (b m) n p', p=patchsize)
x_patch_size = self.embedding_patch_size[patch_index](x_patch_size)
x_patch_num = rearrange(x_patch_num, 'b m (p n) -> (b m) p n', p=patchsize)
x_patch_num = self.embedding_patch_num[patch_index](x_patch_num)
series, prior = self.encoder(x_patch_size, x_patch_num, x_ori, patch_index)
series_patch_mean.append(series), prior_patch_mean.append(prior)
series_patch_mean = list(_flatten(series_patch_mean))
prior_patch_mean = list(_flatten(prior_patch_mean))
if self.output_attention:
return series_patch_mean, prior_patch_mean
else:
return None
class DataEmbedding(nn.Module):
def __init__(self, c_in, d_model, dropout=0.05):
super(DataEmbedding, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.value_embedding(x) + self.position_embedding(x)
return self.dropout(x)
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular', bias=False)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class RevIN(nn.Module):
def __init__(self, num_features: int, eps=1e-5, affine=True):
"""
:param num_features: the number of features or channels
:param eps: a value added for numerical stability
:param affine: if True, RevIN has learnable affine parameters
"""
super(RevIN, self).__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
if self.affine:
self._init_params()
def forward(self, x, mode: str):
if mode == 'norm':
self._get_statistics(x)
x = self._normalize(x)
elif mode == 'denorm':
x = self._denormalize(x)
else:
raise NotImplementedError
return x
def _init_params(self):
# initialize RevIN params: (C,)
self.affine_weight = torch.ones(self.num_features)
self.affine_bias = torch.zeros(self.num_features)
self.affine_weight = self.affine_weight.to(
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.affine_bias = self.affine_bias.to(device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'))
def _get_statistics(self, x):
dim2reduce = tuple(range(1, x.ndim - 1))
self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()
self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()
def _normalize(self, x):
x = x - self.mean
x = x / self.stdev
if self.affine:
x = x * self.affine_weight
x = x + self.affine_bias
return x
def _denormalize(self, x):
if self.affine:
x = x - self.affine_bias
x = x / (self.affine_weight + self.eps * self.eps)
x = x * self.stdev
x = x + self.mean
return x
class AttentionLayer(nn.Module):
def __init__(self, attention, d_model, patch_size, channel, n_heads, win_size, d_keys=None, d_values=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.norm = nn.LayerNorm(d_model)
self.inner_attention = attention
self.patch_size = patch_size
self.channel = channel
self.window_size = win_size
self.n_heads = n_heads
self.patch_query_projection = nn.Linear(d_model, d_keys * n_heads)
self.patch_key_projection = nn.Linear(d_model, d_keys * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_mask):
# patch_size
B, L, M = x_patch_size.shape
H = self.n_heads
queries_patch_size, keys_patch_size = x_patch_size, x_patch_size
queries_patch_size = self.patch_query_projection(queries_patch_size).view(B, L, H, -1)
keys_patch_size = self.patch_key_projection(keys_patch_size).view(B, L, H, -1)
# patch_num
B, L, M = x_patch_num.shape
queries_patch_num, keys_patch_num = x_patch_num, x_patch_num
queries_patch_num = self.patch_query_projection(queries_patch_num).view(B, L, H, -1)
keys_patch_num = self.patch_key_projection(keys_patch_num).view(B, L, H, -1)
# x_ori
B, L, _ = x_ori.shape
values = self.value_projection(x_ori).view(B, L, H, -1)
series, prior = self.inner_attention(
queries_patch_size, queries_patch_num,
keys_patch_size, keys_patch_num,
values, patch_index,
attn_mask
)
return series, prior
class DAC_structure(nn.Module):
def __init__(self, win_size, patch_size, channel, mask_flag=True, scale=None, attention_dropout=0.05,
output_attention=False):
super(DAC_structure, self).__init__()
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
self.window_size = win_size
self.patch_size = patch_size
self.channel = channel
def forward(self, queries_patch_size, queries_patch_num, keys_patch_size, keys_patch_num, values, patch_index,
attn_mask):
# Patch-wise Representation
B, L, H, E = queries_patch_size.shape # batch_size*channel, patch_num, n_head, d_model/n_head
scale_patch_size = self.scale or 1. / math.sqrt(E)
scores_patch_size = torch.einsum("blhe,bshe->bhls", queries_patch_size,
keys_patch_size) # batch*ch, nheads, p_num, p_num
attn_patch_size = scale_patch_size * scores_patch_size
series_patch_size = self.dropout(torch.softmax(attn_patch_size, dim=-1)) # B*D_model H N N
# In-patch Representation
B, L, H, E = queries_patch_num.shape # batch_size*channel, patch_size, n_head, d_model/n_head
scale_patch_num = self.scale or 1. / math.sqrt(E)
scores_patch_num = torch.einsum("blhe,bshe->bhls", queries_patch_num,
keys_patch_num) # batch*ch, nheads, p_size, p_size
attn_patch_num = scale_patch_num * scores_patch_num
series_patch_num = self.dropout(torch.softmax(attn_patch_num, dim=-1)) # B*D_model H S S
# Upsampling
series_patch_size = repeat(series_patch_size, 'b l m n -> b l (m repeat_m) (n repeat_n)',
repeat_m=self.patch_size[patch_index], repeat_n=self.patch_size[patch_index])
series_patch_num = series_patch_num.repeat(1, 1, self.window_size // self.patch_size[patch_index],
self.window_size // self.patch_size[patch_index])
series_patch_size = reduce(series_patch_size, '(b reduce_b) l m n-> b l m n', 'mean', reduce_b=self.channel)
series_patch_num = reduce(series_patch_num, '(b reduce_b) l m n-> b l m n', 'mean', reduce_b=self.channel)
if self.output_attention:
return series_patch_size, series_patch_num
else:
return (None)

View File

@ -0,0 +1,141 @@
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_equal
from sklearn.metrics import roc_auc_score
import torch
import pandas as pd
# temporary solution for relative imports in case pyod is not installed
# if deepod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from deepod.models.time_series.dcdetector import DCdetector
class TestDCdetector(unittest.TestCase):
def setUp(self):
train_file = 'data/omi-1/omi-1_train.csv'
test_file = 'data/omi-1/omi-1_test.csv'
train_df = pd.read_csv(train_file, sep=',', index_col=0)
test_df = pd.read_csv(test_file, index_col=0)
y = test_df['label'].values
train_df, test_df = train_df.drop('label', axis=1), test_df.drop('label', axis=1)
self.Xts_train = train_df.values
self.Xts_test = test_df.values
self.yts_test = y
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.clf = DCdetector(seq_len=100, stride=1, epochs=2,
batch_size=32, lr=1e-4, patch_size=[1,2,5],
device=device, random_state=42)
self.clf.fit(self.Xts_train)
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.Xts_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.Xts_test)
assert_equal(pred_scores.shape[0], self.Xts_test.shape[0])
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.Xts_test)
assert_equal(pred_labels.shape, self.yts_test.shape)
# def test_prediction_proba(self):
# pred_proba = self.clf.predict_proba(self.X_test)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_linear(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='linear')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_unify(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='unify')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_parameter(self):
# with assert_raises(ValueError):
# self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.Xts_test, return_confidence=True)
assert_equal(pred_labels.shape, self.yts_test.shape)
assert_equal(confidence.shape, self.yts_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
# def test_prediction_proba_linear_confidence(self):
# pred_proba, confidence = self.clf.predict_proba(self.X_test,
# method='linear',
# return_confidence=True)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
#
# def test_fit_predict(self):
# pred_labels = self.clf.fit_predict(self.X_train)
# assert_equal(pred_labels.shape, self.y_train.shape)
#
# def test_fit_predict_score(self):
# self.clf.fit_predict_score(self.X_test, self.y_test)
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='roc_auc_score')
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='prc_n_score')
# with assert_raises(NotImplementedError):
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='something')
#
# def test_predict_rank(self):
# pred_socres = self.clf.decision_function(self.X_test)
# pred_ranks = self.clf._predict_rank(self.X_test)
#
# # assert the order is reserved
# assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)
# assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
# assert_array_less(-0.1, pred_ranks)
#
# def test_predict_rank_normalized(self):
# pred_socres = self.clf.decision_function(self.X_test)
# pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
#
# # assert the order is reserved
# assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)
# assert_array_less(pred_ranks, 1.01)
# assert_array_less(-0.1, pred_ranks)
# def test_plot(self):
# os, cutoff1, cutoff2 = self.clf.explain_outlier(ind=1)
# assert_array_less(0, os)
# def test_model_clone(self):
# clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()

View File

@ -5,4 +5,5 @@ pandas>=1.0.0
torch>=1.10.0,<1.13.1
tqdm>=4.62.3
ray==2.6.1
pyarrow>=11.0.0
pyarrow>=11.0.0
einops

View File

@ -3,4 +3,4 @@ pytest-cov==2.10.0
coveralls==2.1.1
setuptools>=49.6.0
tqdm>=4.62.3
einops

View File

@ -73,3 +73,15 @@ RoSAS:
alpha: 0.5
margin: 1
beta: 1
DCdetector:
patch_size: [1,3,5]
batch_size: 128
epochs: 10
lr: 0.0001
n_heads: 1
d_model: 256
e_layers: 3

View File

@ -17,8 +17,6 @@ from deepod.metrics import ts_metrics, point_adjustment
dataset_root = f'/home/{getpass.getuser()}/dataset/5-TSdata/_processed_data/'
parser = argparse.ArgumentParser()
parser.add_argument("--runs", type=int, default=5,
help="how many times we repeat the experiments to "