You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transform.py 6.1 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. # TODO: Wrap it as a class and connect it to GUI
  2. # A script to transform anomaly data to d3m format
  3. import pandas as pd
  4. import numpy as np
  5. import os
  6. import json
  7. ##############################
  8. # Some information for the dataset to be transformed
  9. # Designed for time series data
  10. name = 'kpi'
  11. src_path = './raw_data/kpi.csv'
  12. label_name = 'label'
  13. timestamp_name = 'timestamp'
  14. value_names = ['value']
  15. ratio = 0.8 # Ratio of training data, the rest is for testing
  16. ###############################
  17. dst_root = './' + name
  18. dirs = ['./', 'SCORE', 'TEST', 'TRAIN']
  19. maps = {'./': None, 'SCORE': 'TEST', 'TEST': 'TEST', 'TRAIN': 'TRAIN'}
  20. # Create the corresponding directories
  21. for d in dirs:
  22. if maps[d] is not None:
  23. dataset_name = 'dataset_' + maps[d]
  24. problem_name = 'problem_' + maps[d]
  25. else:
  26. dataset_name = name + '_dataset'
  27. problem_name = name + '_problem'
  28. tables_dir = os.path.join(dst_root, d, dataset_name, 'tables')
  29. if not os.path.exists(tables_dir):
  30. os.makedirs(tables_dir)
  31. problem_dir = os.path.join(dst_root, d, problem_name)
  32. if not os.path.exists(problem_dir):
  33. os.makedirs(problem_dir)
  34. # Process data
  35. _df = pd.DataFrame()
  36. df = pd.read_csv(src_path)
  37. _df['d3mIndex'] = df.index
  38. _df['timestamp'] = df[timestamp_name]
  39. for value_name in value_names:
  40. _df[value_name] = df[value_name]
  41. _df['ground_truth'] = df[label_name]
  42. df = _df
  43. cols = df.columns.tolist()
  44. # Save all the data
  45. df.to_csv(os.path.join(dst_root, name+'_dataset', 'tables', 'learningData.csv'), index=False)
  46. # Save training and testing data
  47. train_df, test_df = df[:int(df.shape[0]*ratio)], df[int(df.shape[0]*ratio):]
  48. train_df.to_csv(os.path.join(dst_root, 'TRAIN', 'dataset_TRAIN', 'tables', 'learningData.csv'), index=False)
  49. test_df.to_csv(os.path.join(dst_root, 'TEST', 'dataset_TEST', 'tables', 'learningData.csv'), index=False)
  50. test_df.to_csv(os.path.join(dst_root, 'SCORE', 'dataset_TEST', 'tables', 'learningData.csv'), index=False)
  51. # Data splits
  52. row_0 = train_df.shape[0]
  53. row_1 = train_df.shape[0]
  54. row = row_0 + row_1
  55. df = pd.DataFrame(np.array([[i for i in range(row)], ['TRAIN' for _ in range(row_0)] + ['TEST' for _ in range(row_1)], [0 for _ in range(row)], [0 for _ in range(row)]]).transpose(), columns = ['d3mIndex', 'type', 'repeat', 'fold'])
  56. # Save data splits for all data
  57. train_df.to_csv(os.path.join(dst_root, name+'_problem', 'dataSplits.csv'), index=False)
  58. # Save training and testing splits
  59. train_df, test_df = df[:row_0], df[row_0:]
  60. train_df.to_csv(os.path.join(dst_root, 'TRAIN', 'problem_TRAIN', 'dataSplits.csv'), index=False)
  61. test_df.to_csv(os.path.join(dst_root, 'TEST', 'problem_TEST', 'dataSplits.csv'), index=False)
  62. test_df.to_csv(os.path.join(dst_root, 'SCORE', 'problem_TEST', 'dataSplits.csv'), index=False)
  63. # Dataset JSON files
  64. # Load template
  65. with open('template/datasetDoc.json') as json_file:
  66. data = json.load(json_file)
  67. columns = []
  68. for i in range(len(cols)):
  69. c = {}
  70. c['colIndex'] = i
  71. c['colName'] = cols[i]
  72. if i == 0:
  73. c['colType'] = 'integer'
  74. c['role'] = ['index']
  75. elif i == 1:
  76. c['colType'] = 'integer'
  77. c['role'] = ['attribute']
  78. elif i == len(cols)-1:
  79. c['colType'] = 'integer'
  80. c['role'] = ['suggestedTarget']
  81. else:
  82. c['colType'] = 'real'
  83. c['role'] = ['attribute']
  84. columns.append(c)
  85. data['dataResources'][0]['columns'] = columns
  86. data['dataResources'][0]['columnsCount'] = len(cols)
  87. data['about']['datasetID'] = name + '_dataset'
  88. data['about']['datasetName'] = name
  89. with open(os.path.join(dst_root, name+'_dataset', 'datasetDoc.json'), 'w') as outfile:
  90. json.dump(data, outfile, indent=4)
  91. data['about']['datasetID'] = name +'_dataset_TRAIN'
  92. data['about']['datasetName'] = "NULL"
  93. with open(os.path.join(dst_root, 'TRAIN', 'dataset_TRAIN', 'datasetDoc.json'), 'w') as outfile:
  94. json.dump(data, outfile, indent=4)
  95. data['about']['datasetID'] = name + '_dataset_TEST'
  96. data['about']['datasetName'] = 'NULL'
  97. with open(os.path.join(dst_root, 'TEST', 'dataset_TEST', 'datasetDoc.json'), 'w') as outfile:
  98. json.dump(data, outfile, indent=4)
  99. data['about']['datasetID'] = name + '_dataset_TEST'
  100. data['about']['datasetName'] = 'NULL'
  101. with open(os.path.join(dst_root, 'SCORE', 'dataset_TEST', 'datasetDoc.json'), 'w') as outfile:
  102. json.dump(data, outfile, indent=4)
  103. # Problem JSON files
  104. # Load template
  105. with open('template/problemDoc.json') as json_file:
  106. data = json.load(json_file)
  107. data['about']['problemID'] = name+'_problem'
  108. data['about']['problemName'] = name+'_problem'
  109. data['about']['problemDescription'] = 'Anomaly detection'
  110. data['about']['taskKeywords'] = ['classification', 'binary', 'tabular']
  111. data['inputs']['data'][0]['datasetID'] = name + '_dataset'
  112. data['inputs']['data'][0]['targets'][0]['colIndex'] = len(cols)-1
  113. data['inputs']['data'][0]['targets'][0]['colName'] = cols[-1]
  114. data['inputs']['dataSplits']['datasetViewMaps']['train'][0]['from'] = name+'_dataset'
  115. data['inputs']['dataSplits']['datasetViewMaps']['test'][0]['from'] = name+'_dataset'
  116. data['inputs']['dataSplits']['datasetViewMaps']['score'][0]['from'] = name+'_dataset'
  117. data['inputs']['dataSplits']['datasetViewMaps']['train'][0]['to'] = name+'_dataset_TRAIN'
  118. data['inputs']['dataSplits']['datasetViewMaps']['test'][0]['to'] = name+'_dataset_TEST'
  119. data['inputs']['dataSplits']['datasetViewMaps']['score'][0]['to'] = name+'_dataset_SCORE'
  120. with open(os.path.join(dst_root, name+'_problem', 'problemDoc.json'), 'w') as outfile:
  121. json.dump(data, outfile, indent=4)
  122. with open(os.path.join(dst_root, 'TRAIN', 'problem_TRAIN', 'problemDoc.json'), 'w') as outfile:
  123. json.dump(data, outfile, indent=4)
  124. with open(os.path.join(dst_root, 'TEST', 'problem_TEST', 'problemDoc.json'), 'w') as outfile:
  125. json.dump(data, outfile, indent=4)
  126. with open(os.path.join(dst_root, 'SCORE', 'problem_TEST', 'problemDoc.json'), 'w') as outfile:
  127. json.dump(data, outfile, indent=4)
  128. # Make an empty targets.csv
  129. with open(os.path.join(dst_root, 'SCORE', 'targets.csv'), 'w') as outfile:
  130. outfile.write('')

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算