util.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. import pandas as pd
  2. import numpy as np
  3. from datetime import datetime,timedelta
  4. import tsfresh.feature_extraction.feature_calculators as tsf
  5. #找波峰、波風個數
  6. #用每個時間點去看前後拉一個n長度的區間 ex: 時間是2020-05-01,n=2,表示拉前後2周的資料做檢查是否為波峰
  7. #波峰邏輯在code內
  8. def find_local_max(x,n,th=0.5):
  9. max_count = 0
  10. max_list = []
  11. max_values_list = []
  12. for i in range(len(x)):
  13. before_list = x[:i][-n:]
  14. num = x[i]
  15. after_list = x[i+1:][:n]
  16. other_list = list(before_list) + list(after_list)
  17. min_other = min(other_list)
  18. min_other = min_other if min_other!=0 else 1
  19. max_other = max(other_list)
  20. max_other = max_other if max_other!=0 else 1
  21. mean_x = np.mean(x)
  22. std_x = np.std(x)
  23. if num!=0:
  24. #波峰邏輯必須同時滿足以下四點
  25. #1. (前後區間內最小值/波峰)需大於門檻值th,目前設定th=0.5
  26. #2. 前區間所有值都需要小於波峰
  27. #3. 後區間所有值都需要小於波峰
  28. #4. 波峰需大於群不平均值+兩倍標準差
  29. if (min_other/float(num)<1-th) and all(before_list<num) and all(after_list<num) and (num>mean_x+2*std_x):
  30. max_count += 1
  31. max_list += [i]
  32. max_values_list += [num]
  33. return max_count,max_list,max_values_list
  34. #生成特徵
  35. #最後兩波峰斜率(slope) : 如果只有一個波峰就會去尋找n長度的前區間照出最小值,以前區間最小值與波峰價算斜率
  36. #現在與最後峰斜率(now_slope) : 現在時間點與最後一個波峰的斜率
  37. #波峰距離現在的時間長度(gap_peak) : 最後一個時間點與最後一個波峰的距離
  38. #0的比例(rate_0) : 計算整段x中為0的比例
  39. #近期0的比例(rate_0_now) : 計算x中最後n個數為0的比例
  40. def gen_feature(x,max_list,n):
  41. #計算斜率
  42. def get_slope(value,idx):
  43. if idx[1]-idx[0]==0:
  44. return None
  45. else:
  46. return (value[1]-value[0])/(idx[1]-idx[0])
  47. if len(max_list)==0:
  48. return None,None,None,None,None
  49. elif len(max_list)==1:
  50. i = max_list[0]
  51. before_list = x[:i][-n:]
  52. if len(before_list)!=0:
  53. min_before_idx = np.argmin(before_list)
  54. slope_value = [before_list[min_before_idx],x[i]]
  55. slope_idx = [min_before_idx,i]
  56. slope = get_slope(slope_value,slope_idx)
  57. else:
  58. slope = 0
  59. gap_peak = len(x) - i - 1
  60. else:
  61. slope_value = [x[max_list[-2]],x[max_list[-1]]]
  62. slope_idx = [max_list[-2],max_list[-1]]
  63. slope = get_slope(slope_value,slope_idx)
  64. gap_peak = len(x) - max_list[-1] - 1
  65. rate_0 = sum(x==0)/len(x)
  66. rate_0_now = sum(x[-n:]==0)/len(x[-n:])
  67. if max_list[-1]==len(x)-1:
  68. now_slope = get_slope([x[-2],x[-1]],[len(x)-1,len(x)])
  69. else:
  70. now_slope = get_slope([x[max_list[-1]],x[-1]],[max_list[-1],len(x)])
  71. return slope, now_slope, gap_peak, rate_0, rate_0_now
  72. #生成特徵資料
  73. #最小時間點("min_{}".format(date_nm)) : 計算 key_word_nm 中最小時間點
  74. #最大時間點("max_{}".format(date_nm)) : 計算 key_word_nm 中最大時間點
  75. #最後的值("last_{}".format(value_nm)) : 取的最後一天的值
  76. def gen_feature_df(data,key_word_nm,date_nm,value_nm,n,th):
  77. feature_df = pd.DataFrame()
  78. for key, analysis_df in data.groupby(key_word_nm):
  79. if len(analysis_df)>n+1:
  80. analysis_df[date_nm] = [i[:10] for i in analysis_df[date_nm]]
  81. max_date = max(analysis_df[date_nm])
  82. min_date = min(analysis_df[date_nm])
  83. count_analysis_df = len(analysis_df)
  84. x = analysis_df[value_nm].values
  85. max_count,max_list,max_values_list = find_local_max(x,n,th)
  86. slope, now_slope, gap_peak, rate_0, rate_0_now = gen_feature(x,max_list,n)
  87. feature_df = feature_df.append({
  88. key_word_nm:key,
  89. "min_{}".format(date_nm):min_date,
  90. "max_{}".format(date_nm):max_date,
  91. "count_":count_analysis_df,
  92. "slope":slope,
  93. "max_count":max_count,
  94. "now_slope":now_slope,
  95. "gap_peak":gap_peak,
  96. "rate_0":rate_0,
  97. "rate_0_now":rate_0_now,
  98. "last_{}".format(value_nm):x[-1]},ignore_index=True)
  99. return feature_df
  100. #生成corrcoef
  101. def gen_corr_set(data,key_word_nm,value_nm,corr_threshold):
  102. corr_list = []
  103. key_list = []
  104. for key,_ in data.groupby(key_word_nm):
  105. key_list += [key]
  106. corr_list += [data.loc[data[key_word_nm]==key,value_nm].values]
  107. x,y = np.where(np.corrcoef(corr_list)>0.7)
  108. similar_set = gen_similar_set(x,y)
  109. rule_list = list_to_set(similar_set)
  110. simulator_nm_list = []
  111. for i in rule_list:
  112. simulator_nm_list += [[key_list[j] for j in i]]
  113. return simulator_nm_list
  114. def list_to_set(similar_set):
  115. rule_list = []
  116. for rule in similar_set:
  117. len_rule = len(rule)
  118. break_list = []
  119. for i in rule_list:
  120. if len(set(i+rule))!=len(i)+len_rule:
  121. break_list += [True]
  122. else:
  123. break_list += [False]
  124. if np.sum(break_list)>=1:
  125. combine_rule = []
  126. re_list = []
  127. for j in np.where(break_list)[0]:
  128. combine_rule += rule_list[j]
  129. re_list += [rule_list[j]]
  130. for re_ in re_list:
  131. rule_list.remove(re_)
  132. combine_rule += rule
  133. rule_list += [sorted(list(set(combine_rule)))]
  134. else:
  135. rule_list += [sorted(rule)]
  136. return rule_list
  137. def gen_similar_set(x,y):
  138. similar_set = []
  139. for i in range(len(x)):
  140. if x[i]!=y[i]:
  141. similar_set += [[x[i],y[i]]]
  142. return similar_set