Former-commit-id:master3f7cfc37e8
[formerlyb74d2356a8
] [formerly3c458d75c2
[formerlya2a165609a
]] [formerly2e3b925ec7
[formerlye31b06239f
] [formerly31ea5772a1
[formerly0a384a8984
]]] [formerly2bf9721c55
[formerly493f4c3014
] [formerlyf1974f2891
[formerly81227824d7
]] [formerlyf3db06651b
[formerly8d78b06e9e
] [formerlyf907ef71f5
[formerlycf532ec083
]]]] [formerly946bb68241
[formerly96ba9ceb7d
] [formerly180343f152
[formerly9fbd0ca43a
]] [formerlycb24f146f1
[formerly414b396fc2
] [formerlyd304cadd7f
[formerly329acba2f7
]]] [formerly44cbbbdeb6
[formerly3b1d52222c
] [formerly795250861e
[formerlyd0ab4d27cb
]] [formerly99abf724fd
[formerly5f40093539
] [formerly3fb7bbd40a
[formerly908f6374bf
]]]]] [formerly156900ecff
[formerlyf7b387c445
] [formerly92c76931e3
[formerlyc7731b7d46
]] [formerly339e1c29e8
[formerly20a8356f5a
] [formerly5b71a77b6b
[formerly6a57734672
]]] [formerly6717e7f53e
[formerlya432c417ff
] [formerly1fe8593ba4
[formerly568256d572
]] [formerlyd841eea05b
[formerly58c80baa94
] [formerlyb2e4d6bc92
[formerly809a919c22
]]]] [formerly7d24f921fc
[formerly5f16493568
] [formerly32434bfce5
[formerly773ffcf350
]] [formerly174627e114
[formerlycf159d2675
] [formerly09c70e1cc9
[formerly2b9c213aad
]]] [formerlyb6c669ab04
[formerly7c0b650d34
] [formerly2e210feb12
[formerly7143d46ab4
]] [formerlybbf4c5f3aa
[formerlyb6370d4615
] [formerlyb7147eadb4
[formerly501035efa9
]]]]]] Former-commit-id:49012012f6
[formerlyb9bb9e1499
] [formerly4a6c600048
[formerlye7e139db3f
]] [formerly78b2a6c046
[formerlyf641bd1128
] [formerly1c7a591504
[formerly67689a8d9c
]]] [formerly797c03e482
[formerly2bbff2c4db
] [formerly9ab8c745ee
[formerlydc5e07cb3f
]] [formerly12fcc4027f
[formerlyec3f1d9078
] [formerlyb3d4bcd0a5
[formerlyd73228793c
]]]] [formerly912e6c46ec
[formerly52270242af
] [formerlyc8b312f42a
[formerlyd40c96306e
]] [formerly84e9e155a4
[formerly8a8acd041a
] [formerly6115e6e49f
[formerly643cff68df
]]] [formerlyee555be73d
[formerly5a214858f4
] [formerly5cc2dade41
[formerlyc792c83235
]] [formerly5932e3f807
[formerly3390f186bc
] [formerlyb7147eadb4
]]]] Former-commit-id:71accf5c5f
[formerly52bc81705b
] [formerly367a0893b3
[formerlye5f4ba0713
]] [formerlye7f38d4073
[formerlybea0027d20
] [formerlya0d9e910f1
[formerlye936a0d13e
]]] [formerly2eb4bfb3fc
[formerly5769769ddc
] [formerly61e28b2083
[formerly4d1b5b5d39
]] [formerlyd939d3e623
[formerly091ffd784f
] [formerlybf0416419e
[formerly0f38b48d92
]]]] Former-commit-id:71dff5394c
[formerlyfaebf231de
] [formerly2f13c6357f
[formerly3014c51839
]] [formerlyd8ff0b66fc
[formerlyc0403e6401
] [formerly4f37326f4f
[formerly035a2e6ff3
]]] Former-commit-id:1e2d5f638a
[formerlye49643e7de
] [formerly3fe5eb8c6b
[formerlya5428797f0
]] Former-commit-id:887b61afe5
[formerlydba4aebac6
] Former-commit-id:66f8f84a57
@@ -0,0 +1,70 @@ | |||||
from d3m import index | |||||
from d3m.metadata.base import ArgumentType | |||||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||||
# extract_columns_by_semantic_types(targets) -> ^ | |||||
# Creating pipeline | |||||
pipeline_description = Pipeline() | |||||
pipeline_description.add_input(name='inputs') | |||||
# Step 0: dataset_to_dataframe | |||||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||||
step_0.add_output('produce') | |||||
pipeline_description.add_step(step_0) | |||||
# Step 1: column_parser | |||||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_1.add_output('produce') | |||||
pipeline_description.add_step(step_1) | |||||
# Step 2: extract_columns_by_semantic_types(attributes) | |||||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_2.add_output('produce') | |||||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||||
pipeline_description.add_step(step_2) | |||||
# Step 3: extract_columns_by_semantic_types(targets) | |||||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_3.add_output('produce') | |||||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||||
pipeline_description.add_step(step_3) | |||||
attributes = 'steps.2.produce' | |||||
targets = 'steps.3.produce' | |||||
# Step 4: processing | |||||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) | |||||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||||
step_4.add_output('produce') | |||||
pipeline_description.add_step(step_4) | |||||
# Step 5: algorithm` | |||||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.auto_correlation')) | |||||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||||
step_5.add_output('produce') | |||||
pipeline_description.add_step(step_5) | |||||
# Step 6: Predictions | |||||
step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) | |||||
step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||||
step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_6.add_output('produce') | |||||
pipeline_description.add_step(step_6) | |||||
# Final Output | |||||
pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') | |||||
# Output to json | |||||
data = pipeline_description.to_json() | |||||
with open('example_pipeline.json', 'w') as f: | |||||
f.write(data) | |||||
print(data) | |||||
@@ -0,0 +1,70 @@ | |||||
from d3m import index | |||||
from d3m.metadata.base import ArgumentType | |||||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||||
# extract_columns_by_semantic_types(targets) -> ^ | |||||
# Creating pipeline | |||||
pipeline_description = Pipeline() | |||||
pipeline_description.add_input(name='inputs') | |||||
# Step 0: dataset_to_dataframe | |||||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||||
step_0.add_output('produce') | |||||
pipeline_description.add_step(step_0) | |||||
# Step 1: column_parser | |||||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_1.add_output('produce') | |||||
pipeline_description.add_step(step_1) | |||||
# Step 2: extract_columns_by_semantic_types(attributes) | |||||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_2.add_output('produce') | |||||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||||
pipeline_description.add_step(step_2) | |||||
# Step 3: extract_columns_by_semantic_types(targets) | |||||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_3.add_output('produce') | |||||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||||
pipeline_description.add_step(step_3) | |||||
attributes = 'steps.2.produce' | |||||
targets = 'steps.3.produce' | |||||
# Step 4: processing | |||||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) | |||||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||||
step_4.add_output('produce') | |||||
pipeline_description.add_step(step_4) | |||||
# Step 5: algorithm` | |||||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.deeplog')) | |||||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||||
step_5.add_output('produce') | |||||
pipeline_description.add_step(step_5) | |||||
# Step 6: Predictions | |||||
step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) | |||||
step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||||
step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_6.add_output('produce') | |||||
pipeline_description.add_step(step_6) | |||||
# Final Output | |||||
pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') | |||||
# Output to json | |||||
data = pipeline_description.to_json() | |||||
with open('example_pipeline.json', 'w') as f: | |||||
f.write(data) | |||||
print(data) | |||||
@@ -0,0 +1,70 @@ | |||||
from d3m import index | |||||
from d3m.metadata.base import ArgumentType | |||||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||||
# extract_columns_by_semantic_types(targets) -> ^ | |||||
# Creating pipeline | |||||
pipeline_description = Pipeline() | |||||
pipeline_description.add_input(name='inputs') | |||||
# Step 0: dataset_to_dataframe | |||||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||||
step_0.add_output('produce') | |||||
pipeline_description.add_step(step_0) | |||||
# Step 1: column_parser | |||||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_1.add_output('produce') | |||||
pipeline_description.add_step(step_1) | |||||
# Step 2: extract_columns_by_semantic_types(attributes) | |||||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_2.add_output('produce') | |||||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||||
pipeline_description.add_step(step_2) | |||||
# Step 3: extract_columns_by_semantic_types(targets) | |||||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_3.add_output('produce') | |||||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||||
pipeline_description.add_step(step_3) | |||||
attributes = 'steps.2.produce' | |||||
targets = 'steps.3.produce' | |||||
# Step 4: processing | |||||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) | |||||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||||
step_4.add_output('produce') | |||||
pipeline_description.add_step(step_4) | |||||
# Step 5: algorithm` | |||||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.matrix_profile')) | |||||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||||
step_5.add_output('produce') | |||||
pipeline_description.add_step(step_5) | |||||
# Step 6: Predictions | |||||
step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) | |||||
step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||||
step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_6.add_output('produce') | |||||
pipeline_description.add_step(step_6) | |||||
# Final Output | |||||
pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') | |||||
# Output to json | |||||
data = pipeline_description.to_json() | |||||
with open('example_pipeline.json', 'w') as f: | |||||
f.write(data) | |||||
print(data) | |||||
@@ -0,0 +1,70 @@ | |||||
from d3m import index | |||||
from d3m.metadata.base import ArgumentType | |||||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||||
# extract_columns_by_semantic_types(targets) -> ^ | |||||
# Creating pipeline | |||||
pipeline_description = Pipeline() | |||||
pipeline_description.add_input(name='inputs') | |||||
# Step 0: dataset_to_dataframe | |||||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||||
step_0.add_output('produce') | |||||
pipeline_description.add_step(step_0) | |||||
# Step 1: column_parser | |||||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_1.add_output('produce') | |||||
pipeline_description.add_step(step_1) | |||||
# Step 2: extract_columns_by_semantic_types(attributes) | |||||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_2.add_output('produce') | |||||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||||
pipeline_description.add_step(step_2) | |||||
# Step 3: extract_columns_by_semantic_types(targets) | |||||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||||
step_3.add_output('produce') | |||||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||||
pipeline_description.add_step(step_3) | |||||
attributes = 'steps.2.produce' | |||||
targets = 'steps.3.produce' | |||||
# Step 4: processing | |||||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler')) | |||||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||||
step_4.add_output('produce') | |||||
pipeline_description.add_step(step_4) | |||||
# Step 5: algorithm` | |||||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sod')) | |||||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||||
step_5.add_output('produce') | |||||
pipeline_description.add_step(step_5) | |||||
# Step 6: Predictions | |||||
step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) | |||||
step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||||
step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||||
step_6.add_output('produce') | |||||
pipeline_description.add_step(step_6) | |||||
# Final Output | |||||
pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce') | |||||
# Output to json | |||||
data = pipeline_description.to_json() | |||||
with open('example_pipeline.json', 'w') as f: | |||||
f.write(data) | |||||
print(data) | |||||
@@ -0,0 +1,134 @@ | |||||
import unittest | |||||
from datetime import datetime | |||||
from d3m import container, utils | |||||
from d3m.metadata import base as metadata_base | |||||
from feature_analysis import AutoCorrelation | |||||
#import utils as test_utils | |||||
import numpy as np | |||||
import pandas as pd | |||||
class AutoCorrelationTestCase(unittest.TestCase): | |||||
def test_basic(self): | |||||
self.maxDiff = None | |||||
main = container.DataFrame({'d3mIndex': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], | |||||
'timestamp': [1472918400, 1472918700, 1472919000, 1472919300, | |||||
1472919600, 1472919900, 1472920200, 1472920500, 1472920800, 1472921100], | |||||
'value': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], | |||||
'ground_truth': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]}, | |||||
columns = ['d3mIndex', 'timestamp', 'value', 'ground_truth'], generate_metadata = True) | |||||
""" | |||||
main.metadata = main.metadata.update_column(0, {'name': 'd3mIndex_'}) | |||||
main.metadata = main.metadata.update_column(1, {'name': 'timestamp_'}) | |||||
main.metadata = main.metadata.update_column(2, {'name': 'value_'}) | |||||
main.metadata = main.metadata.update_column(3, {'name': 'ground_truth_'}) | |||||
""" | |||||
#print(main) | |||||
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ | |||||
'selector': [], | |||||
'metadata': { | |||||
# 'top_level': 'main', | |||||
'schema': metadata_base.CONTAINER_SCHEMA_VERSION, | |||||
'structural_type': 'd3m.container.pandas.DataFrame', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], | |||||
'dimension': { | |||||
'name': 'rows', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], | |||||
'length': 10, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__'], | |||||
'metadata': { | |||||
'dimension': { | |||||
'name': 'columns', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], | |||||
'length': 4, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 0], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 1], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 2], | |||||
'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 3], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, | |||||
}]) | |||||
hyperparams_class = AutoCorrelation.AutoCorrelation.metadata.get_hyperparams().defaults() | |||||
hyperparams_class = hyperparams_class.replace({'nlags': 2}) | |||||
#hyperparams_class = hyperparams_class.replace({'use_semantic_types': True}) | |||||
primitive = AutoCorrelation.AutoCorrelation(hyperparams=hyperparams_class) | |||||
new_main = primitive.produce(inputs=main).value | |||||
print(new_main) | |||||
new_main_drop = new_main['value_acf'] | |||||
new_main_drop = new_main_drop.reset_index(drop = True) | |||||
expected_result = pd.DataFrame({'acf':[1.000000, 0.700000, 0.412121, 0.148485, -0.078788, -0.257576, -0.375758, -0.421212, -0.381818, -0.245455]}) | |||||
new_main_drop.reset_index() | |||||
self.assertEqual(all(new_main_drop), all(expected_result)) | |||||
#print(main.metadata.to_internal_simple_structure()) | |||||
#print(new_main.metadata.to_internal_simple_structure()) | |||||
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ | |||||
'selector': [], | |||||
'metadata': { | |||||
# 'top_level': 'main', | |||||
'schema': metadata_base.CONTAINER_SCHEMA_VERSION, | |||||
'structural_type': 'd3m.container.pandas.DataFrame', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], | |||||
'dimension': { | |||||
'name': 'rows', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], | |||||
'length': 10, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__'], | |||||
'metadata': { | |||||
'dimension': { | |||||
'name': 'columns', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], | |||||
'length': 4, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 0], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 1], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 2], | |||||
'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 3], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, | |||||
}]) | |||||
params = primitive.get_params() | |||||
primitive.set_params(params=params) | |||||
if __name__ == '__main__': | |||||
unittest.main() |
@@ -0,0 +1,121 @@ | |||||
import unittest | |||||
from datetime import datetime | |||||
from d3m import container, utils | |||||
from d3m.metadata import base as metadata_base | |||||
from data_processing import TimeIntervalTransform | |||||
#import utils as test_utils | |||||
import numpy as np | |||||
class TimeIntervalTransformTestCase(unittest.TestCase): | |||||
def test_basic(self): | |||||
self.maxDiff = None | |||||
main = container.DataFrame({'d3mIndex': [0, 1, 2, 3, 4, 5, 6, 7], | |||||
'timestamp': [1472918400, 1472918700, 1472919000, 1472919300, | |||||
1472919600, 1472919900, 1472920200, 1472920500], | |||||
'value': [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], | |||||
'ground_truth': [0, 1, 0, 1, 0, 1, 0,1]}, | |||||
columns = ['d3mIndex', 'timestamp', 'value', 'ground_truth'], generate_metadata = True) | |||||
""" | |||||
main.metadata = main.metadata.update_column(0, {'name': 'd3mIndex_'}) | |||||
main.metadata = main.metadata.update_column(1, {'name': 'timestamp_'}) | |||||
main.metadata = main.metadata.update_column(2, {'name': 'value_'}) | |||||
main.metadata = main.metadata.update_column(3, {'name': 'ground_truth_'}) | |||||
""" | |||||
#print(main) | |||||
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ | |||||
'selector': [], | |||||
'metadata': { | |||||
# 'top_level': 'main', | |||||
'schema': metadata_base.CONTAINER_SCHEMA_VERSION, | |||||
'structural_type': 'd3m.container.pandas.DataFrame', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], | |||||
'dimension': { | |||||
'name': 'rows', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], | |||||
'length': 8, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__'], | |||||
'metadata': { | |||||
'dimension': { | |||||
'name': 'columns', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], | |||||
'length': 4, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 0], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 1], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 2], | |||||
'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 3], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, | |||||
}]) | |||||
hyperparams_class = TimeIntervalTransform.TimeIntervalTransform.metadata.get_hyperparams() | |||||
primitive = TimeIntervalTransform.TimeIntervalTransform(hyperparams=hyperparams_class.defaults()) | |||||
new_main = primitive.produce(inputs=main).value | |||||
new_rows = len(new_main.index) | |||||
self.assertEqual(new_rows, 8) | |||||
#print(main.metadata.to_internal_simple_structure()) | |||||
#print(new_main.metadata.to_internal_simple_structure()) | |||||
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{ | |||||
'selector': [], | |||||
'metadata': { | |||||
# 'top_level': 'main', | |||||
'schema': metadata_base.CONTAINER_SCHEMA_VERSION, | |||||
'structural_type': 'd3m.container.pandas.DataFrame', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'], | |||||
'dimension': { | |||||
'name': 'rows', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'], | |||||
'length': 8, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__'], | |||||
'metadata': { | |||||
'dimension': { | |||||
'name': 'columns', | |||||
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'], | |||||
'length': 4, | |||||
}, | |||||
}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 0], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'd3mIndex'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 1], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 2], | |||||
'metadata': {'structural_type': 'numpy.float64', 'name': 'value'}, | |||||
}, { | |||||
'selector': ['__ALL_ELEMENTS__', 3], | |||||
'metadata': {'structural_type': 'numpy.int64', 'name': 'ground_truth'}, | |||||
}]) | |||||
params = primitive.get_params() | |||||
primitive.set_params(params=params) | |||||
if __name__ == '__main__': | |||||
unittest.main() | |||||