From 61e0bb010a9620d4bf9d47cfba00c8856e2552b1 Mon Sep 17 00:00:00 2001 From: jamielxu <63481760+jamielxu@users.noreply.github.com> Date: Fri, 18 Dec 2020 02:19:45 +0800 Subject: [PATCH] SubsequenceClustering Former-commit-id: 58d4ede849abf0c7f367497f798ea26aba2699e8 [formerly 8d592aac99007869d6901ae75028c074e8f1343e] [formerly 922bd035ebe1f48f58591dbb01067af5556bce55 [formerly dcf9930f6d3987c206551fd47a779310296059b6]] [formerly 8ec008e08470f3ef1ef88afb1654d6888f081136 [formerly 07e88a77c907c933faaf41310b18e50d99c79973] [formerly 6e46824dd94df093dd18b0edf5381a403ac5c48b [formerly 2c1dd8069d5b64a23169a54afd333728e8cad70e]]] [formerly 63b75e6a26032958cdfc4bad950695b8d54c3054 [formerly 062d738cdce96cc6754c0b6614b34ceb3dba3780] [formerly 642c71d15c1c79616ff1bb6a7334f347a537cc1e [formerly 7bc158191909a0b6a229908d9a814206bdc8c201]] [formerly 52d8e58df82a603d9f5e1d2314b93f3656f1f6e9 [formerly b0bc94290d90d7182d56896f38bf81135a89eda8] [formerly bd767f40c791f4653676796242715603fda209c5 [formerly 8dde23d0c3bf167b2e6f90723a3c79d41040b996]]]] [formerly 7b9edf3f3d292748ab1b55a420751a34c3662d24 [formerly 6acdf83c7e80cf49fe6e2bffacf78ef5bf154e2a] [formerly 536db956ff6d30865c8dec72344db8b6aa639a8e [formerly eae83d189a81bc128aeee543b75bf54f133c7905]] [formerly fa6a98411c47003fa935afaf1649aeb7237b31ee [formerly 4236cc45f1f3e64bec543bb357b5772bb93c1554] [formerly f0d83967a5dbef03e9f8f990e359e0f3c7f9bdb4 [formerly b273acc8c700e15795f547506a912c138de7b7ba]]] [formerly 4e46f41d783a3a93dd934e433d7b6926a03c9617 [formerly 0821e9f8dccc0d9596e179b2227b82926ce372f8] [formerly 6f6ac2d3d1b66496abcccd49db2e4af6fc7ccf23 [formerly 8c1df2bdb8c9ec396e1d4e6bcae14cc8a8481bf4]] [formerly ef8675d17af6b14415767bb11b90b04f3079281f [formerly 5914ec24dce4de5bbfa03aa6b5aecd358e40d46a] [formerly 28344b0665ca96a9882cfbcd67e16268e1a83d6d [formerly 237b1f7c7e5299bad8e26b5795f8bbdf8fd915ed]]]]] [formerly c0443d9abc1a332417588fa02d78c2a08bd71d8d [formerly 51f728abcd0b9b62a95a7c85994bf5bd5d30b9ae] [formerly 33e80eb165d9bb8c9d6cb7e60ef60d0155ad1d95 [formerly 79ecacb6d4004495edfda4fdf7366fc080a3bf45]] [formerly c55e1c6ab097eed1582e93519ede41cd275f70c5 [formerly 5bf354c3fd1c72a29031f6b9c072c1087ca8528c] [formerly a41e547dea0f15da9ec768a601b8c58eb3079f5b [formerly 3e703e6fffe5c3ba0ee87c263b8b7e78ba46a7fc]]] [formerly 61183c145eda0021e27cff26c0655ba5cb4c41ba [formerly 62edaa7785aa77e8a5566c1d878e567e7c1468a1] [formerly e45c529a41628301b1c989d0279ef5d8831aa3e3 [formerly e114005809245007613c585ae2a8baeb020845d6]] [formerly 16c61ace93a76c3372a0682a324a5dcd2aea3540 [formerly 231f2ec490ac46b3c70caaa25c2c692f01e482f0] [formerly e71c23985f413923eda68a360bf48382cac0d194 [formerly bcef76e01151e3dbc856b671e9abed223d2819b0]]]] [formerly f3f328cc7d0e8d527646988595eac69d44152871 [formerly fbd84dc8b1cd3eca353ef424c1fec2e067f03323] [formerly fe48841ec864b896b0f531946ff0b61bdb0e1302 [formerly b9de0ce815d243ded786ee6c3f1636ab71a2a3c0]] [formerly 754bd03d471113c5fbbaec82c383b583af667238 [formerly 6a50760914415e2be88d26abb4c8868c84569735] [formerly b9f12152747e7dbbc72bfb432803bad3d23bec1d [formerly 8af036e904ede94efefc466f41b7299a2c54d783]]] [formerly 1e864459d99d2f81bedb597c767da95376d71aec [formerly d36baba471f98a28f981ded36f42d22ba23994d9] [formerly dbf76bc1d4256a2d98946552e7432dd7a7afb2b2 [formerly 97ea7bb2942cdb6ea8d0dddc243929bd66c32db0]] [formerly d0d311bce6d423878a21e4ab859d7295ed69e6fb [formerly 6a0e5dcb0b6529ca3ad9bf416234e00354f5de83] [formerly 6a7555493a36d08d70ffd34db2055d3d878594a5 [formerly 4f5b8dcf667af8ad5262c9d913414a9960e4e307]]]]]] Former-commit-id: 4b0d79180bcb776b4f825d93a7da9492e5ffac0f [formerly e3a754851d163ba7fe12fb5c1686f3647c62d402] [formerly 13da9aed20c498d33e7464c529f4c50a66410c9f [formerly 91c042d8cac741c88fc701b1c43a10e460353103]] [formerly 552fe1b8b85d338c7aad391e6264bc009582b2a1 [formerly 77a8c2416cf287900feddc2eacd93870aa4b4183] [formerly 7eae9a55e20725f9565b5802b4a7b9e9c1aad204 [formerly 71d14595e498d170f8312e1bebf0499e93498859]]] [formerly 97fea7599c73e2e7a41ad2909c4c34bc9975663c [formerly e6f6f69edf663874fa00d01cdaf49b7aae66421f] [formerly 9d1e70200645bf62ad761c6952e155686ccd4e5a [formerly b86e653154e6a03516041c9003f8813dab04331f]] [formerly b619d5e95d8c2ce45107052114bf8b210f245cdb [formerly 7786af0e8fbc9010c2e0bd209278b9260e9c9eed] [formerly 2caf358c19ec02c9b00a64b15f2cf19366895b43 [formerly b8e3330dcc053d2a9b8ab94c862901e00ad42a87]]]] [formerly 6eea285308c387752bc1f06e905eb231b3df840e [formerly bc06ab64932051eda5aad9cf834d6e944242cd07] [formerly db6ab38d5989ee69e86a5546ab9d6f1ca961b42f [formerly ced489c2446104ad7a5a94435780aadca61edeb6]] [formerly c9f155159a44e09c8d44fe81d194f4fa11510e51 [formerly 3231c93249fd23cb4c83a61398654a49dbe42158] [formerly e417ff736373bedd928dd27043c6a8eb9243a58a [formerly 474b8ca7fec91e0a0a9f58fc1bedb1a001658ca7]]] [formerly c0bb265a2530276dbff3f33c89c2c1eaa7f9d156 [formerly f8d9097806ada1738463555f6cc4088c586ae091] [formerly 22cb28de527408912a23b51fdb1c39717ff7fb5a [formerly 3588629a7ecb005fab87fa6ac78c9789b00393af]] [formerly d802ab0c05eff0c0e1250d12de4699372fdceba6 [formerly 4b8980042d881712d0cd2c6caa64051d6f78b3b7] [formerly 6a7555493a36d08d70ffd34db2055d3d878594a5]]]] Former-commit-id: 5418ca83914d215e7075f19a0a634c2d8b7625e8 [formerly 0132666eeaf5619a915e96e85c422fb1a750597c] [formerly ab2f45aa2419cfa46be6f4631d6ca73993d9d98d [formerly 833cf5d7c55951f605211a46c07531095d6e5ab4]] [formerly aa8ccded4ccc1bed768dd2f8af2997618161191b [formerly f4935918374826e8f72d3be058ccdc8961621264] [formerly fa310e35c20d3c9040bffcc48eac6d4cc1ea8826 [formerly 5e7158dab0c29718fdcd067fc2c79b92c5ce1a58]]] [formerly 6fff32487292b0575cdf8841cd6b96cd1dc1cc99 [formerly 86639a7d6986e0c6b43aa2148a74fa01a9f94d8e] [formerly b2e79d96a8bd1c7acfee2b900574a26e9d370a09 [formerly 6dde3f56091cf63836e2f58f25d46b8cf6469ef8]] [formerly 3b26400dc930b233f06ef52c023c9394b9bc7721 [formerly 27e24b8dfd0889074a5d27ee1a43286e7989c076] [formerly 6c64bcc6335539242ac04dc2a08eb8a07294f22a [formerly e580b4ac9b363fe9022ecc4166cbd58b82221bee]]]] Former-commit-id: f7b783128b740d02661c1a0dce6562f049631609 [formerly 592aaa1f8b33536669ba3d033019a671a67053d0] [formerly d46f3b35bb61963292d6562a1e23f93e7dbe3b8d [formerly 5636fe16ccb823baf38cf3ce2950cf58b173bf0a]] [formerly 241ca0d72715565c1bf8ec58fceb2be392a0e915 [formerly 2ad44e15df52e8d340888b49eea93b4c5f61bf28] [formerly d18e9443360927797928516f7a0295719c833e8a [formerly 0465122443122fac4b77ab817c45491696c74367]]] Former-commit-id: fe8495b63868b355331959d1a4646fecb3add598 [formerly 4ea90d030ac0efc799d3827079bc3bb10ade3f21] [formerly b3074b0c2518ad29a3c5b2c76575299736131b39 [formerly 54c240a59283ded3c8a7982642bbcb425d382904]] Former-commit-id: dc06cfd09f7d621118dfaa7ea373c2fea9198046 [formerly 1d64b4e9d4776caa5cca63d2ca4b2a4319c5c1c1] Former-commit-id: 7bf10d5fa327039f69518b5636483eacdc161571 --- .../timeseries_processing/SubsequenceClustering.py | 473 +++++++++++++++++++++ 1 file changed, 473 insertions(+) create mode 100644 tods/timeseries_processing/SubsequenceClustering.py diff --git a/tods/timeseries_processing/SubsequenceClustering.py b/tods/timeseries_processing/SubsequenceClustering.py new file mode 100644 index 0000000..354cd38 --- /dev/null +++ b/tods/timeseries_processing/SubsequenceClustering.py @@ -0,0 +1,473 @@ +from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple +from numpy import ndarray +from collections import OrderedDict +from scipy import sparse +from sklearn.utils import check_array +import numpy as np +import typing +import time +import pandas as pd + +from d3m import container +from d3m.primitive_interfaces import base, transformer +from d3m.metadata import base as metadata_base, hyperparams + +from d3m.container.numpy import ndarray as d3m_ndarray +from d3m.container import DataFrame as d3m_dataframe +from d3m.metadata import hyperparams, params, base as metadata_base +from d3m import utils +from d3m.base import utils as base_utils +from d3m.exceptions import PrimitiveNotFittedError +from d3m.primitive_interfaces.base import CallResult, DockerContainer + + +__all__ = ('SubsequenceClustering',) + +Inputs = container.DataFrame +Outputs = container.DataFrame + + +class Hyperparams(hyperparams.Hyperparams): + # Tuning + window_size = hyperparams.Hyperparameter[int]( + default=1, + semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'], + description="The moving window size.", + ) + step = hyperparams.Hyperparameter[int]( + default=1, + semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'], + description="The displacement for moving window.", + ) + # return_numpy = hyperparams.UniformBool( + # default=True, + # semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + # description="If True, return the data format in 3d numpy array." + # ) + # flatten = hyperparams.UniformBool( + # default=True, + # semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + # description="If True, flatten the returned array in 2d." + # ) + flatten_order= hyperparams.Enumeration( + values=['C', 'F', 'A'], + default='F', + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Decide the order of the flatten for multivarite sequences." + ) + + + # Control + columns_using_method= hyperparams.Enumeration( + values=['name', 'index'], + default='index', + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Choose to use columns by names or indecies. If 'name', \"use_columns\" or \"exclude_columns\" is used. If 'index', \"use_columns_name\" or \"exclude_columns_name\" is used." + ) + use_columns_name = hyperparams.Set( + elements=hyperparams.Hyperparameter[str](''), + default=(), + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="A set of column names to force primitive to operate on. If any specified column cannot be parsed, it is skipped.", + ) + exclude_columns_name = hyperparams.Set( + elements=hyperparams.Hyperparameter[str](''), + default=(), + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="A set of column names to not operate on. Applicable only if \"use_columns_name\" is not provided.", + ) + use_columns = hyperparams.Set( + elements=hyperparams.Hyperparameter[int](-1), + default=(), + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.", + ) + exclude_columns = hyperparams.Set( + elements=hyperparams.Hyperparameter[int](-1), + default=(), + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.", + ) + return_result = hyperparams.Enumeration( + values=['append', 'replace', 'new'], + default='replace', + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.", + ) + use_semantic_types = hyperparams.UniformBool( + default=False, + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe" + ) + add_index_columns = hyperparams.UniformBool( + default=False, + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".", + ) + error_on_no_input = hyperparams.UniformBool( + default=True, + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'], + description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.", + ) + + return_semantic_type = hyperparams.Enumeration[str]( + values=['https://metadata.datadrivendiscovery.org/types/Attribute', 'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'], + default='https://metadata.datadrivendiscovery.org/types/Attribute', + description='Decides what semantic type to attach to generated attributes', + semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'] + ) + + +class SubsequenceClustering(transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]): + """ + Subsequence Time Seires Clustering. + + Parameters + ---------- + window_size : int + The moving window size. + + step : int, optional (default=1) + The displacement for moving window. + + # return_numpy : bool, optional (default=True) + # If True, return the data format in 3d numpy array. + + # flatten : bool, optional (default=True) + # If True, flatten the returned array in 2d. + + flatten_order : str, optional (default='F') + Decide the order of the flatten for multivarite sequences. + ‘C’ means to flatten in row-major (C-style) order. + ‘F’ means to flatten in column-major (Fortran- style) order. + ‘A’ means to flatten in column-major order if a is Fortran contiguous in memory, + row-major order otherwise. ‘K’ means to flatten a in the order the elements occur in memory. + The default is ‘F’. + + use_columns: Set + A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped. + + exclude_columns: Set + A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided. + + return_result: Enumeration + Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false. + + use_semantic_types: Bool + Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe. + + add_index_columns: Bool + Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\". + + error_on_no_input: Bool( + Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False. + + return_semantic_type: Enumeration[str]( + Decides what semantic type to attach to generated attributes' + """ + + __author__: "DATA Lab at Texas A&M University" + metadata = metadata_base.PrimitiveMetadata({ + "name": "Subsequence Clustering Primitive", + "python_path": "d3m.primitives.tods.timeseries_processing.subsequence_clustering", + "source": {'name': 'DATA Lab at Texas A&M University', 'contact': 'mailto:khlai037@tamu.edu', + 'uris': ['https://gitlab.com/lhenry15/tods.git', ]}, + "algorithm_types": [metadata_base.PrimitiveAlgorithmType.BK_FILTER,], + "primitive_family": metadata_base.PrimitiveFamily.DATA_PREPROCESSING, + "id": "cf0bd4c1-9e09-4471-a2a3-6956deed17ac", + "hyperparams_to_tune": ['window_size', 'step', 'flatten_order'], + "version": "0.0.1", + }) + + + def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]: + """ + Process the testing data. + Args: + inputs: Container DataFrame. + + Returns: + Container DataFrame after BKFilter. + """ + # Get cols to fit. + self._fitted = False + self._training_inputs, self._training_indices = self._get_columns_to_fit(inputs, self.hyperparams) + self._input_column_names = self._training_inputs.columns + + + if len(self._training_indices) > 0: + # self._clf.fit(self._training_inputs) + self._fitted = True + else: + if self.hyperparams['error_on_no_input']: + raise RuntimeError("No input columns were selected") + self.logger.warn("No input columns were selected") + + + + if not self._fitted: + raise PrimitiveNotFittedError("Primitive not fitted.") + sk_inputs = inputs + + if self.hyperparams['use_semantic_types']: + sk_inputs = inputs.iloc[:, self._training_indices] + output_columns = [] + if len(self._training_indices) > 0: + sk_output = self._get_sub_matrices(sk_inputs, + window_size=self.hyperparams['window_size'], + step=self.hyperparams['step'], + flatten_order=self.hyperparams['flatten_order']) + if sparse.issparse(sk_output): + sk_output = sk_output.toarray() + + outputs = self._wrap_predictions(inputs, sk_output) + + if len(outputs.columns) == len(self._input_column_names): + outputs.columns = self._input_column_names + output_columns = [outputs] + + else: + if self.hyperparams['error_on_no_input']: + raise RuntimeError("No input columns were selected") + self.logger.warn("No input columns were selected") + + # outputs = base_utils.combine_columns(return_result=self.hyperparams['return_result'], + # add_index_columns=self.hyperparams['add_index_columns'], + # inputs=inputs, column_indices=self._training_indices, + # columns_list=output_columns) + + print(outputs.shape) + self._write(outputs) + return CallResult(outputs) + + + @classmethod + def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams): + """ + Select columns to fit. + Args: + inputs: Container DataFrame + hyperparams: d3m.metadata.hyperparams.Hyperparams + + Returns: + list + """ + if not hyperparams['use_semantic_types']: + return inputs, list(range(len(inputs.columns))) + + inputs_metadata = inputs.metadata + + def can_produce_column(column_index: int) -> bool: + return cls._can_produce_column(inputs_metadata, column_index, hyperparams) + + use_columns = [] + exclude_columns = [] + + # if hyperparams['columns_using_method'] == 'name': + # inputs_cols = inputs.columns.values.tolist() + # for i in range(len(inputs_cols)): + # if inputs_cols[i] in hyperparams['use_columns_name']: + # use_columns.append(i) + # elif inputs_cols[i] in hyperparams['exclude_columns_name']: + # exclude_columns.append(i) + # else: + use_columns=hyperparams['use_columns'] + exclude_columns=hyperparams['exclude_columns'] + + columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(inputs_metadata, use_columns=use_columns, exclude_columns=exclude_columns, can_use_column=can_produce_column) + return inputs.iloc[:, columns_to_produce], columns_to_produce + # return columns_to_produce + + @classmethod + def _can_produce_column(cls, inputs_metadata: metadata_base.DataMetadata, column_index: int, hyperparams: Hyperparams) -> bool: + """ + Output whether a column can be processed. + Args: + inputs_metadata: d3m.metadata.base.DataMetadata + column_index: int + + Returns: + boolnp + """ + column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index)) + + accepted_structural_types = (int, float, np.integer, np.float64) + accepted_semantic_types = set() + accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute") + if not issubclass(column_metadata['structural_type'], accepted_structural_types): + return False + + semantic_types = set(column_metadata.get('semantic_types', [])) + + if len(semantic_types) == 0: + cls.logger.warning("No semantic types found in column metadata") + return False + + # Making sure all accepted_semantic_types are available in semantic_types + if len(accepted_semantic_types - semantic_types) == 0: + return True + + return False + + + @classmethod + def _update_predictions_metadata(cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs], + target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata: + """ + Updata metadata for selected columns. + Args: + inputs_metadata: metadata_base.DataMetadata + outputs: Container Dataframe + target_columns_metadata: list + + Returns: + d3m.metadata.base.DataMetadata + """ + outputs_metadata = metadata_base.DataMetadata().generate(value=outputs) + + for column_index, column_metadata in enumerate(target_columns_metadata): + column_metadata.pop("structural_type", None) + outputs_metadata = outputs_metadata.update_column(column_index, column_metadata) + + return outputs_metadata + + def _wrap_predictions(self, inputs: Inputs, predictions: ndarray) -> Outputs: + """ + Wrap predictions into dataframe + Args: + inputs: Container Dataframe + predictions: array-like data (n_samples, n_features) + + Returns: + Dataframe + """ + outputs = d3m_dataframe(predictions, generate_metadata=True) + target_columns_metadata = self._add_target_columns_metadata(outputs.metadata, self.hyperparams) + outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata) + return outputs + + + @classmethod + def _add_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams): + """ + Add target columns metadata + Args: + outputs_metadata: metadata.base.DataMetadata + hyperparams: d3m.metadata.hyperparams.Hyperparams + + Returns: + List[OrderedDict] + """ + outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length'] + target_columns_metadata: List[OrderedDict] = [] + for column_index in range(outputs_length): + column_name = "output_{}".format(column_index) + column_metadata = OrderedDict() + semantic_types = set() + semantic_types.add(hyperparams["return_semantic_type"]) + column_metadata['semantic_types'] = list(semantic_types) + + column_metadata["name"] = str(column_name) + target_columns_metadata.append(column_metadata) + + return target_columns_metadata + + def _write(self, inputs:Inputs): + inputs.to_csv(str(time.time())+'.csv') + + def _get_sub_sequences_length(self, n_samples, window_size, step): + """Pseudo chop a univariate time series into sub sequences. Return valid + length only. + Parameters + ---------- + X : numpy array of shape (n_samples,) + The input samples. + window_size : int + The moving window size. + step_size : int, optional (default=1) + The displacement for moving window. + Returns + ------- + valid_len : int + The number of subsequences. + + """ + valid_len = int(np.floor((n_samples - window_size) / step)) + 1 + return valid_len + + + def _get_sub_matrices(self, X, window_size, step=1, flatten_order='F'): + """ + Chop a multivariate time series into sub sequences (matrices). + Parameters + ---------- + X : numpy array of shape (n_samples,) + The input samples. + window_size : int + The moving window size. + step : int, optional (default=1) + The displacement for moving window. + + return_numpy : bool, optional (default=True) + If True, return the data format in 3d numpy array. + flatten : bool, optional (default=True) + If True, flatten the returned array in 2d. + + flatten_order : str, optional (default='F') + Decide the order of the flatten for multivarite sequences. + ‘C’ means to flatten in row-major (C-style) order. + ‘F’ means to flatten in column-major (Fortran- style) order. + ‘A’ means to flatten in column-major order if a is Fortran contiguous in memory, + row-major order otherwise. ‘K’ means to flatten a in the order the elements occur in memory. + The default is ‘F’. + Returns + ------- + X_sub : numpy array of shape (valid_len, window_size*n_sequences) + The numpy matrix with each row stands for a flattend submatrix. + """ + X = check_array(X).astype(np.float) + n_samples, n_sequences = X.shape[0], X.shape[1] + + # get the valid length + valid_len = self._get_sub_sequences_length(n_samples, window_size, step) + + X_sub = [] + X_left_inds = [] + X_right_inds = [] + + # exclude the edge + steps = list(range(0, n_samples, step)) + steps = steps[:valid_len] + + # print(n_samples, n_sequences) + for idx, i in enumerate(steps): + X_sub.append(X[i: i + window_size, :]) + X_left_inds.append(i) + X_right_inds.append(i + window_size) + + X_sub = np.asarray(X_sub) + + # if return_numpy: + # if flatten: + + temp_array = np.zeros([valid_len, window_size * n_sequences]) + if flatten_order == 'C': + for i in range(valid_len): + temp_array[i, :] = X_sub[i, :, :].flatten(order='C') + + else: + for i in range(valid_len): + temp_array[i, :] = X_sub[i, :, :].flatten(order='F') + + print("temp_array", temp_array.shape) + return temp_array #, np.asarray(X_left_inds), np.asarray(X_right_inds) + + # else: + # return np.asarray(X_sub), np.asarray(X_left_inds), np.asarray(X_right_inds) + # else: + # return X_sub, np.asarray(X_left_inds), np.asarray(X_right_inds) + + + + +