diff --git a/examples/build_Autocorrelation_pipeline.py b/examples/build_Autocorrelation_pipeline.py new file mode 100644 index 0000000..4242e73 --- /dev/null +++ b/examples/build_Autocorrelation_pipeline.py @@ -0,0 +1,70 @@ +from d3m import index +from d3m.metadata.base import ArgumentType +from d3m.metadata.pipeline import Pipeline, PrimitiveStep + +# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest +# extract_columns_by_semantic_types(targets) -> ^ + +# Creating pipeline +pipeline_description = Pipeline() +pipeline_description.add_input(name='inputs') + +# Step 0: dataset_to_dataframe +step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) +step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') +step_0.add_output('produce') +pipeline_description.add_step(step_0) + +# Step 1: column_parser +step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) +step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_1.add_output('produce') +pipeline_description.add_step(step_1) + +# Step 2: extract_columns_by_semantic_types(attributes) +step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_2.add_output('produce') +step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/Attribute']) +pipeline_description.add_step(step_2) + +# Step 3: extract_columns_by_semantic_types(targets) +step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_3.add_output('produce') +step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) +pipeline_description.add_step(step_3) + +attributes = 'steps.2.produce' +targets = 'steps.3.produce' + +# Step 4: processing +step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) +step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) +step_4.add_output('produce') +pipeline_description.add_step(step_4) + +# Step 5: algorithm` +step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.auto_correlation')) +step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') +step_5.add_output('produce') +pipeline_description.add_step(step_5) + +# Step 6: Predictions +step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) +step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') +step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_6.add_output('produce') +pipeline_description.add_step(step_6) + +# Final Output +pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') + +# Output to json +data = pipeline_description.to_json() +with open('example_pipeline.json', 'w') as f: + f.write(data) + print(data) + diff --git a/examples/build_DeepLog_pipeline.py b/examples/build_DeepLog_pipeline.py new file mode 100644 index 0000000..21fd586 --- /dev/null +++ b/examples/build_DeepLog_pipeline.py @@ -0,0 +1,70 @@ +from d3m import index +from d3m.metadata.base import ArgumentType +from d3m.metadata.pipeline import Pipeline, PrimitiveStep + +# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest +# extract_columns_by_semantic_types(targets) -> ^ + +# Creating pipeline +pipeline_description = Pipeline() +pipeline_description.add_input(name='inputs') + +# Step 0: dataset_to_dataframe +step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) +step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') +step_0.add_output('produce') +pipeline_description.add_step(step_0) + +# Step 1: column_parser +step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) +step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_1.add_output('produce') +pipeline_description.add_step(step_1) + +# Step 2: extract_columns_by_semantic_types(attributes) +step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_2.add_output('produce') +step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/Attribute']) +pipeline_description.add_step(step_2) + +# Step 3: extract_columns_by_semantic_types(targets) +step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_3.add_output('produce') +step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) +pipeline_description.add_step(step_3) + +attributes = 'steps.2.produce' +targets = 'steps.3.produce' + +# Step 4: processing +step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) +step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) +step_4.add_output('produce') +pipeline_description.add_step(step_4) + +# Step 5: algorithm` +step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.deeplog')) +step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') +step_5.add_output('produce') +pipeline_description.add_step(step_5) + +# Step 6: Predictions +step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) +step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') +step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_6.add_output('produce') +pipeline_description.add_step(step_6) + +# Final Output +pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') + +# Output to json +data = pipeline_description.to_json() +with open('example_pipeline.json', 'w') as f: + f.write(data) + print(data) + diff --git a/examples/build_MatrixProfile_pipeline.py b/examples/build_MatrixProfile_pipeline.py new file mode 100644 index 0000000..3d1e66c --- /dev/null +++ b/examples/build_MatrixProfile_pipeline.py @@ -0,0 +1,70 @@ +from d3m import index +from d3m.metadata.base import ArgumentType +from d3m.metadata.pipeline import Pipeline, PrimitiveStep + +# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest +# extract_columns_by_semantic_types(targets) -> ^ + +# Creating pipeline +pipeline_description = Pipeline() +pipeline_description.add_input(name='inputs') + +# Step 0: dataset_to_dataframe +step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) +step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') +step_0.add_output('produce') +pipeline_description.add_step(step_0) + +# Step 1: column_parser +step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) +step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_1.add_output('produce') +pipeline_description.add_step(step_1) + +# Step 2: extract_columns_by_semantic_types(attributes) +step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_2.add_output('produce') +step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/Attribute']) +pipeline_description.add_step(step_2) + +# Step 3: extract_columns_by_semantic_types(targets) +step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_3.add_output('produce') +step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) +pipeline_description.add_step(step_3) + +attributes = 'steps.2.produce' +targets = 'steps.3.produce' + +# Step 4: processing +step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) +step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) +step_4.add_output('produce') +pipeline_description.add_step(step_4) + +# Step 5: algorithm` +step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.matrix_profile')) +step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') +step_5.add_output('produce') +pipeline_description.add_step(step_5) + +# Step 6: Predictions +step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) +step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') +step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_6.add_output('produce') +pipeline_description.add_step(step_6) + +# Final Output +pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') + +# Output to json +data = pipeline_description.to_json() +with open('example_pipeline.json', 'w') as f: + f.write(data) + print(data) + diff --git a/examples/build_SOD_pipeline.py b/examples/build_SOD_pipeline.py new file mode 100644 index 0000000..9e92d0b --- /dev/null +++ b/examples/build_SOD_pipeline.py @@ -0,0 +1,70 @@ +from d3m import index +from d3m.metadata.base import ArgumentType +from d3m.metadata.pipeline import Pipeline, PrimitiveStep + +# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest +# extract_columns_by_semantic_types(targets) -> ^ + +# Creating pipeline +pipeline_description = Pipeline() +pipeline_description.add_input(name='inputs') + +# Step 0: dataset_to_dataframe +step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) +step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') +step_0.add_output('produce') +pipeline_description.add_step(step_0) + +# Step 1: column_parser +step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) +step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_1.add_output('produce') +pipeline_description.add_step(step_1) + +# Step 2: extract_columns_by_semantic_types(attributes) +step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_2.add_output('produce') +step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/Attribute']) +pipeline_description.add_step(step_2) + +# Step 3: extract_columns_by_semantic_types(targets) +step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) +step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') +step_3.add_output('produce') +step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, + data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) +pipeline_description.add_step(step_3) + +attributes = 'steps.2.produce' +targets = 'steps.3.produce' + +# Step 4: processing +step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) +step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) +step_4.add_output('produce') +pipeline_description.add_step(step_4) + +# Step 5: algorithm` +step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sod')) +step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') +step_5.add_output('produce') +pipeline_description.add_step(step_5) + +# Step 6: Predictions +step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) +step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') +step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') +step_6.add_output('produce') +pipeline_description.add_step(step_6) + +# Final Output +pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') + +# Output to json +data = pipeline_description.to_json() +with open('example_pipeline.json', 'w') as f: + f.write(data) + print(data) + diff --git a/tods/tests/test_Autocorrelation.py b/tods/tests/test_Autocorrelation.py new file mode 100644 index 0000000..bc82ff3 --- /dev/null +++ b/tods/tests/test_Autocorrelation.py @@ -0,0 +1,134 @@ +import unittest + + +from datetime import datetime + +from d3m import container, utils +from d3m.metadata import base as metadata_base + +from feature_analysis import AutoCorrelation + + +#import utils as test_utils + +import numpy as np +import pandas as pd + +class AutoCorrelationTestCase(unittest.TestCase): + def test_basic(self): + self.maxDiff = None + main = container.DataFrame({'d3mIndex': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + 'timestamp': [1472918400, 1472918700, 1472919000, 1472919300, + 1472919600, 1472919900, 1472920200, 1472920500, 1472920800, 1472921100], + 'value': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], + 'ground_truth': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]}, + columns = ['d3mIndex', 'timestamp', 'value', 'ground_truth'], generate_metadata = True) + """ + main.metadata = main.metadata.update_column(0, {'name': 'd3mIndex_'}) + main.metadata = main.metadata.update_column(1, {'name': 'timestamp_'}) + main.metadata = main.metadata.update_column(2, {'name': 'value_'}) + main.metadata = main.metadata.update_column(3, {'name': 'ground_truth_'}) + """ + + #print(main) + + self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ + 'selector': [], + 'metadata': { + # 'top_level': 'main', + 'schema': metadata_base.CONTAINER_SCHEMA_VERSION, + 'structural_type': 'd3m.container.pandas.DataFrame', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], + 'dimension': { + 'name': 'rows', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], + 'length': 10, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__'], + 'metadata': { + 'dimension': { + 'name': 'columns', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], + 'length': 4, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__', 0], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 1], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 2], + 'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 3], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, + }]) + + hyperparams_class = AutoCorrelation.AutoCorrelation.metadata.get_hyperparams().defaults() + hyperparams_class = hyperparams_class.replace({'nlags': 2}) + #hyperparams_class = hyperparams_class.replace({'use_semantic_types': True}) + primitive = AutoCorrelation.AutoCorrelation(hyperparams=hyperparams_class) + new_main = primitive.produce(inputs=main).value + + print(new_main) + + new_main_drop = new_main['value_acf'] + new_main_drop = new_main_drop.reset_index(drop = True) + + + expected_result = pd.DataFrame({'acf':[1.000000, 0.700000, 0.412121, 0.148485, -0.078788, -0.257576, -0.375758, -0.421212, -0.381818, -0.245455]}) + + new_main_drop.reset_index() + + self.assertEqual(all(new_main_drop), all(expected_result)) + + + #print(main.metadata.to_internal_simple_structure()) + #print(new_main.metadata.to_internal_simple_structure()) + + self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ + 'selector': [], + 'metadata': { + # 'top_level': 'main', + 'schema': metadata_base.CONTAINER_SCHEMA_VERSION, + 'structural_type': 'd3m.container.pandas.DataFrame', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], + 'dimension': { + 'name': 'rows', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], + 'length': 10, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__'], + 'metadata': { + 'dimension': { + 'name': 'columns', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], + 'length': 4, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__', 0], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 1], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 2], + 'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 3], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, + }]) + + params = primitive.get_params() + primitive.set_params(params=params) + + +if __name__ == '__main__': + unittest.main() diff --git a/tods/tests/test_TimeIntervalTransform.py b/tods/tests/test_TimeIntervalTransform.py new file mode 100644 index 0000000..62a48ce --- /dev/null +++ b/tods/tests/test_TimeIntervalTransform.py @@ -0,0 +1,121 @@ +import unittest + + +from datetime import datetime + +from d3m import container, utils +from d3m.metadata import base as metadata_base + +from data_processing import TimeIntervalTransform + + +#import utils as test_utils + +import numpy as np + +class TimeIntervalTransformTestCase(unittest.TestCase): + def test_basic(self): + self.maxDiff = None + main = container.DataFrame({'d3mIndex': [0, 1, 2, 3, 4, 5, 6, 7], + 'timestamp': [1472918400, 1472918700, 1472919000, 1472919300, + 1472919600, 1472919900, 1472920200, 1472920500], + 'value': [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], + 'ground_truth': [0, 1, 0, 1, 0, 1, 0,1]}, + columns = ['d3mIndex', 'timestamp', 'value', 'ground_truth'], generate_metadata = True) + """ + main.metadata = main.metadata.update_column(0, {'name': 'd3mIndex_'}) + main.metadata = main.metadata.update_column(1, {'name': 'timestamp_'}) + main.metadata = main.metadata.update_column(2, {'name': 'value_'}) + main.metadata = main.metadata.update_column(3, {'name': 'ground_truth_'}) + """ + + #print(main) + + self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ + 'selector': [], + 'metadata': { + # 'top_level': 'main', + 'schema': metadata_base.CONTAINER_SCHEMA_VERSION, + 'structural_type': 'd3m.container.pandas.DataFrame', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], + 'dimension': { + 'name': 'rows', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], + 'length': 8, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__'], + 'metadata': { + 'dimension': { + 'name': 'columns', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], + 'length': 4, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__', 0], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 1], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 2], + 'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 3], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, + }]) + + hyperparams_class = TimeIntervalTransform.TimeIntervalTransform.metadata.get_hyperparams() + primitive = TimeIntervalTransform.TimeIntervalTransform(hyperparams=hyperparams_class.defaults()) + new_main = primitive.produce(inputs=main).value + new_rows = len(new_main.index) + self.assertEqual(new_rows, 8) + + #print(main.metadata.to_internal_simple_structure()) + #print(new_main.metadata.to_internal_simple_structure()) + + self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ + 'selector': [], + 'metadata': { + # 'top_level': 'main', + 'schema': metadata_base.CONTAINER_SCHEMA_VERSION, + 'structural_type': 'd3m.container.pandas.DataFrame', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], + 'dimension': { + 'name': 'rows', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], + 'length': 8, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__'], + 'metadata': { + 'dimension': { + 'name': 'columns', + 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], + 'length': 4, + }, + }, + }, { + 'selector': ['__ALL_ELEMENTS__', 0], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 1], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 2], + 'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, + }, { + 'selector': ['__ALL_ELEMENTS__', 3], + 'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, + }]) + + params = primitive.get_params() + primitive.set_params(params=params) + + +if __name__ == '__main__': + unittest.main() +