@@ -94,7 +94,6 @@ class CollectiveCommonTest: | |||||
def test_prediction_scores(self): | def test_prediction_scores(self): | ||||
pred_scores, _, _ = self.clf.decision_function(self.X_test) | pred_scores, _, _ = self.clf.decision_function(self.X_test) | ||||
# check score shapes | # check score shapes | ||||
assert_equal(pred_scores.shape[0], self.y_test.shape[0]) | assert_equal(pred_scores.shape[0], self.y_test.shape[0]) | ||||
@@ -141,33 +141,7 @@ class LSTMOutlierDetector(CollectiveBaseDetector): | |||||
relative_error = (np.linalg.norm(y_predict - y_buf, axis=1) / np.linalg.norm(y_buf + 1e-6, axis=1)).ravel() | relative_error = (np.linalg.norm(y_predict - y_buf, axis=1) / np.linalg.norm(y_buf + 1e-6, axis=1)).ravel() | ||||
return relative_error | return relative_error | ||||
def predict(self, X): # pragma: no cover | |||||
"""Predict if a particular sample is an outlier or not. | |||||
Parameters | |||||
---------- | |||||
X : numpy array of shape (n_samples, n_features) | |||||
The input samples. | |||||
Returns | |||||
------- | |||||
outlier_labels : numpy array of shape (n_samples,) | |||||
For each observation, tells whether or not | |||||
it should be considered as an outlier according to the | |||||
fitted model. 0 stands for inliers and 1 for outliers. | |||||
""" | |||||
check_is_fitted(self, ['decision_scores_', 'threshold_', 'labels_']) | |||||
pred_score, X_left_inds, X_right_inds = self.decision_function(X) | |||||
pred_score = np.concatenate((np.zeros((self.window_size,)), pred_score)) | |||||
X_left_inds = np.concatenate((np.zeros((self.window_size,)), X_left_inds)) | |||||
X_right_inds = np.concatenate((np.zeros((self.window_size,)), X_right_inds)) | |||||
return (pred_score > self.threshold_).astype( | |||||
'int').ravel(), X_left_inds.ravel(), X_right_inds.ravel() | |||||
def decision_function(self, X: np.array): | def decision_function(self, X: np.array): | ||||
"""Predict raw anomaly scores of X using the fitted detector. | """Predict raw anomaly scores of X using the fitted detector. | ||||
@@ -259,6 +233,10 @@ class LSTMOutlierDetector(CollectiveBaseDetector): | |||||
# print(relative_error_right_inds) | # print(relative_error_right_inds) | ||||
pred_score = danger_coefficient * self.danger_coefficient_weight + averaged_relative_error * (1 - self.danger_coefficient_weight) | pred_score = danger_coefficient * self.danger_coefficient_weight + averaged_relative_error * (1 - self.danger_coefficient_weight) | ||||
pred_score = np.concatenate((np.zeros((self.window_size,)), pred_score)) | |||||
relative_error_left_inds = np.concatenate((np.arange(self.window_size), relative_error_left_inds+self.window_size)) | |||||
relative_error_right_inds = np.concatenate((np.arange(self.window_size)+self.window_size, relative_error_right_inds+self.window_size)) | |||||
return pred_score, relative_error_left_inds, relative_error_right_inds | return pred_score, relative_error_left_inds, relative_error_right_inds | ||||
@@ -27,7 +27,7 @@ class LSTMODTestCase(unittest.TestCase): | |||||
self.X_train = d3m_dataframe({'data': [3., 4., 8., 16, 18, 13., 22., 36., 59., 128, 62, 67, 78, 100]}, | self.X_train = d3m_dataframe({'data': [3., 4., 8., 16, 18, 13., 22., 36., 59., 128, 62, 67, 78, 100]}, | ||||
columns=['data'], generate_metadata=True) | columns=['data'], generate_metadata=True) | ||||
self.y_train = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) | |||||
self.y_train = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) | |||||
self.X_test = d3m_dataframe({'data': [3., 4., 8.6, 13.4, 22.5, 17, 19.2, 36.1, 127, -23, 59.2]}, | self.X_test = d3m_dataframe({'data': [3., 4., 8.6, 13.4, 22.5, 17, 19.2, 36.1, 127, -23, 59.2]}, | ||||
columns=['data'], generate_metadata=True) | columns=['data'], generate_metadata=True) | ||||
self.y_test = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]) | self.y_test = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]) | ||||