Skip to content

Commit

Permalink
ss
Browse files Browse the repository at this point in the history
  • Loading branch information
bbfamily committed Oct 23, 2016
1 parent 07c5229 commit 5cd78f3
Show file tree
Hide file tree
Showing 16 changed files with 23,234 additions and 0 deletions.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,331 @@
# 打开股票量化的黑箱(自己动手写一个印钞机) 第七章

### 作者:阿布🐶

### 未经本人允许禁止转载

____

## 非均衡胜负收益带来的必然非均衡胜负比例,目标由因子的能力解决一部分,模式识别提升关键的一部分

___

上一章构造了 3个主裁和一个辅助裁判,这一章开始构建边裁及裁判的最优参数选择

fn = ZEnv.g_project_root + '/data/cache/orders_pd_ump_hit_predict_abu'
key = 'orders_pd_ump_hit_predict_abu'
orders_pd_ump = ZCommonUtil.load_hdf5(fn, key)
orders_pd_ump.shape
# out
(47374, 39)

**UmpEdge 边裁**

import UmpEdge
ump_edge = UmpEdge.UmpEdgeClass(orders_pd_ump)

**边裁使用profit, profit_cg作为gmm分类数据生成ss分类序列,之后根据profit_cg进行rank数据生成p_rk_cg,再找到top winN,top lossN N现在的设置是25%且对外不暴露,分别给于1, -1, 其它的都是0生成rk列,将atr,deg,wave所有数据的numpy矩阵保存起来,对输入的数据进行标准化后实行距离对比,找到最匹配的rk标签**


### 简单说就是引入交易后置参数收益,把收益的top 25%单子,和blow 25%的单子的特征抽取,但是由于在实际交易那一时刻没有收益这个变量,所以无法直接预测,所以之前将特征的x预处理做标准化,再使 pairwise_distances计算输入的相似度最相似那个预存x,影射出那个x是否在top25%或者below25%, 明白了吗?不明白就看源代码吧,下面也贴了一段

核心代码如下所示,或者直接查阅源代码UmpEdge.py

def dump_clf(self):
dump_clf = {'top_loss_ss': self.top_loss_ss, 'top_win_ss': self.top_win_ss,
'fiter_df': self.fiter.df, 'fiter_x': self.fiter.x}

ZCommonUtil.dump_pickle(dump_clf, self.dump_file_fn())

def predict(self, **kwargs):

dump_clf = UmpEdgeClass.dump_clf_manager.get_ump(self)

x = np.array([kwargs[col] for col in dump_clf['fiter_df'].columns[2:-3]])

x = x.reshape(1, -1)
con_x = np.concatenate((x, dump_clf['fiter_x']), axis=0)

x_scale_param = self.scaler.fit(con_x)
con_x = self.scaler.fit_transform(con_x, x_scale_param)

distance_min_ind = pairwise_distances(con_x[0].reshape(1, -1), con_x[1:],
metric='euclidean').argmin()
'''
置换出可以作为分类输入的x
'''
ss = dump_clf['fiter_df'].iloc[distance_min_ind]['ss']
if ss in dump_clf['top_loss_ss']:
return -1
elif ss in dump_clf['top_win_ss']:
return 1
return 0

def gmm_component_filter(self, nc=20, threshold=0.72, show=True):
clf = GMM(nc, n_iter=500, random_state=3).fit(self.fiter.y)
ss = clf.predict(self.fiter.y)

self.fiter.df['p_rk_cg'] = self.fiter.df['profit_cg'].rank()
self.fiter.df['ss'] = ss

win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * 0.25
loss_top = len(self.fiter.df['profit_cg']) * 0.25
self.fiter.df['rk'] = 0
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] > win_top, 1, self.fiter.df['rk'])
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] < loss_top, -1, self.fiter.df['rk'])

xt = pd.crosstab(self.fiter.df['ss'], self.fiter.df['rk'])
xt_pct = xt.div(xt.sum(1).astype(float), axis=0)

if show:
xt_pct.plot(
figsize=(16, 8),
kind='bar',
stacked=True,
title=str('ss') + ' -> ' + str('result'))
plt.xlabel(str('ss'))
plt.ylabel(str('result'))

ZLog.info(xt_pct[xt_pct[-1] > threshold])
ZLog.info(xt_pct[xt_pct[1] > threshold])

self.top_loss_ss = xt_pct[xt_pct[-1] > threshold].index
self.top_win_ss = xt_pct[xt_pct[1] > threshold].index
return xt, xt_pct

___

ump_edge.fiter.df.head()




![Snip20161021_46.png](http:https://upload-images.jianshu.io/upload_images/3136804-05f7f7a7a0e9cc3e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)


### 如下可视化是不是更明白点了,top 25是1, below 25是-1,其它的都用0代表,gmm分类之后rank profit,大于阀值的类别得到保留,这些类别如下所示

xt, xt_pct = ump_edge.gmm_component_filter(nc=20, threshold=0.72, show=True)
# out
rk -1 0 1
ss
0 1.0 0.0 0.0
6 1.0 0.0 0.0
7 1.0 0.0 0.0
9 1.0 0.0 0.0
19 1.0 0.0 0.0
rk -1 0 1
ss
2 0.0 0.000000 1.000000
4 0.0 0.000000 1.000000
5 0.0 0.000000 1.000000
10 0.0 0.000000 1.000000
11 0.0 0.000000 1.000000
12 0.0 0.028401 0.971599
13 0.0 0.000000 1.000000
16 0.0 0.000000 1.000000
18 0.0 0.000000 1.000000


![output_13_1.png](http:https://upload-images.jianshu.io/upload_images/3136804-9606d18890498912.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)

### pd.crosstab交织表生成gmm生成的类别与rk的交织结果

xt

| rk | -1 | 0 | 1 |
| --- | --- | --- | --- |
| ss | | | |
| --- | --- | --- | --- |
| 0 | 2465 | 0 | 0 |
| 1 | 0 | 4326 | 241 |
| 2 | 0 | 0 | 4 |
| 3 | 1073 | 4038 | 0 |
| 4 | 0 | 0 | 3053 |
| 5 | 0 | 0 | 33 |
| 6 | 47 | 0 | 0 |
| 7 | 3633 | 0 | 0 |
| 8 | 103 | 4799 | 0 |
| 9 | 886 | 0 | 0 |
| 10 | 0 | 0 | 20 |
| 11 | 0 | 0 | 978 |
| 12 | 0 | 114 | 3900 |
| 13 | 0 | 0 | 2 |
| 14 | 0 | 3249 | 1036 |
| 15 | 0 | 4346 | 1 |
| 16 | 0 | 0 | 213 |
| 17 | 2892 | 1536 | 0 |
| 18 | 0 | 0 | 1723 |
| 19 | 104 | 0 | 0 |

xt_pct


| rk | -1 | 0 | 1 |
| --- | --- | --- | --- |
| ss | | | |
| --- | --- | --- | --- |
| 0 | 1.000000 | 0.000000 | 0.000000 |
| 1 | 0.000000 | 0.947230 | 0.052770 |
| 2 | 0.000000 | 0.000000 | 1.000000 |
| 3 | 0.209939 | 0.790061 | 0.000000 |
| 4 | 0.000000 | 0.000000 | 1.000000 |
| 5 | 0.000000 | 0.000000 | 1.000000 |
| 6 | 1.000000 | 0.000000 | 0.000000 |
| 7 | 1.000000 | 0.000000 | 0.000000 |
| 8 | 0.021012 | 0.978988 | 0.000000 |
| 9 | 1.000000 | 0.000000 | 0.000000 |
| 10 | 0.000000 | 0.000000 | 1.000000 |
| 11 | 0.000000 | 0.000000 | 1.000000 |
| 12 | 0.000000 | 0.028401 | 0.971599 |
| 13 | 0.000000 | 0.000000 | 1.000000 |
| 14 | 0.000000 | 0.758226 | 0.241774 |
| 15 | 0.000000 | 0.999770 | 0.000230 |
| 16 | 0.000000 | 0.000000 | 1.000000 |
| 17 | 0.653117 | 0.346883 | 0.000000 |
| 18 | 0.000000 | 0.000000 | 1.000000 |
| 19 | 1.000000 | 0.000000 | 0.000000 |



### 最后的常规任务就是将裁判本地序列话

ump_edge.dump_clf()

_____

## 下面的主题是寻找裁判最优参数

### 将orders_pd_ump数据重新读取,新的数据包涵了使用这些裁判进行裁决,但不拦截的所有统计数据,形式如下所示表格所示,通过数据统计来组织拦截规则,比如几个主裁一起合作,达到几个hit就直接拦截,几个等待边裁裁决,辅助裁判应用赋予的权力权重,这些参数设置好了之后就可以组成一个实用的交易拦截系统!

orders_pd_ump.filter(regex='result|ump_main_mlfiter*|ind_key').tail(2)



![
![Uploading Snip20161021_48_082768.png . . .]
](http:https://upload-images.jianshu.io/upload_images/3136804-a3fbcdd985324493.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)


# 先对三个主裁寻找参数,主裁的优先级是一致的,至少这里是这样实现的

from MlFiter import MlFiterClass

orders_pd_ump['ind_key'] = np.arange(0, len(orders_pd_ump))
orders_pd_tmp = orders_pd_ump.filter(regex='result|ump_main_mlfiter*|ind_key')
order_has_ret = orders_pd_tmp[orders_pd_tmp['result'] <> 0]
order_has_ret['result'] = np.where(order_has_ret['result'] == -1, 0, 1)

order_has_ret['ump_main_mlfitermainpdclass_predict'] = np.where(order_has_ret['ump_main_mlfitermainpdclass_predict'] == True, 1, 0)
order_has_ret['ump_main_mlfiterdegpdclass_predict'] = np.where(order_has_ret['ump_main_mlfiterdegpdclass_predict'] == True, 1, 0)
order_has_ret['ump_main_mlfiterwavepdclass_predict'] = np.where(order_has_ret['ump_main_mlfiterwavepdclass_predict'] == True, 1, 0)

order_has_ret = order_has_ret[(order_has_ret['ump_main_mlfitermainpdclass_predict'] == 0) |
(order_has_ret['ump_main_mlfiterdegpdclass_predict'] == 0) |
(order_has_ret['ump_main_mlfiterwavepdclass_predict'] == 0)]

order_has_ret['predict_sum'] = order_has_ret['ump_main_mlfitermainpdclass_predict'] + order_has_ret['ump_main_mlfiterdegpdclass_predict'] + \
order_has_ret['ump_main_mlfiterwavepdclass_predict']

order_has_ret['hit_sum'] = order_has_ret['ump_main_mlfiterdegpdclass_hit'] + order_has_ret['ump_main_mlfitermainpdclass_hit'] + \
order_has_ret['ump_main_mlfiterdegpdclass_hit']
matrix = order_has_ret.as_matrix()
y = matrix[:, 0]
x = matrix[:, 1:]
fiter = MlFiterClass(x, y, order_has_ret)
fiter.df.head()
order_has_ret.head()




![Snip20161021_48.png](http:https://upload-images.jianshu.io/upload_images/3136804-441c95531d1755e1.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)


**由于简书不支持html的表格,完整的请参阅git上的ipython notebook版本**

这里为了让你能看懂我将表格做个T再截一张图,有条件请参阅notebook版本

order_has_ret.T


![Snip20161021_49.png](http:https://upload-images.jianshu.io/upload_images/3136804-222d61326b10643a.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)


通过可视化寻找参数,综合参考数量,概率等多方面因素

**如下图所示,1个hit以上就上0.65了 20个hit 0.70以上,> 20个就可以主裁直接使用裁决了,小于的等待,辅助裁判,边裁**

from sklearn import metrics
import matplotlib.pyplot as plt

hd_range = np.arange(0, 50)
hd_result = []
hd_sum = []
for hd in hd_range:
xt = order_has_ret[(order_has_ret['hit_sum'] > hd)]['result'].value_counts()
hs = xt.sum()
hd_sum.append(hs)
hd_result.append(float(xt[0])/hs)
cmap = plt.get_cmap('jet', 20)
cmap.set_under('gray')
fig, ax = plt.subplots()
ax.plot(hd_range, hd_result)
cax = ax.scatter(hd_range, hd_result, c=hd_sum, cmap=cmap, vmin=np.min(hd_sum),
vmax=np.max(hd_sum))
ax.grid(True)
fig.colorbar(cax, label='hd_sum', extend='min')



![output_24_1.png](http:https://upload-images.jianshu.io/upload_images/3136804-99be5e035557680e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)


**跳空的辅助裁判本身数据就少,辅助裁判尽量裁决 jump ump可以使用hit 5作为阀值,大于5个hit直接裁决,否则等待边裁**


orders_pd_tmp = orders_pd_ump.filter(regex='result|ump_jump_mlfiter|ind_key*')
order_has_ret = orders_pd_tmp[orders_pd_tmp['result'] <> 0]
order_has_ret['result'] = np.where(order_has_ret['result'] == -1, 0, 1)
order_has_ret['ump_jump_mlfiterjumppdclass_predict'] = np.where(order_has_ret['ump_jump_mlfiterjumppdclass_predict'] == True, 1, 0)

order_has_ret = order_has_ret[(order_has_ret['ump_jump_mlfiterjumppdclass_predict'] == 0)]

hd_range = np.unique(order_has_ret['ump_jump_mlfiterjumppdclass_hit'])[:-1]
# -1 要使用0开始的范围
hd_range = np.array(hd_range) - 1
print hd_range
hd_result = []
hd_sum = []
for hd in hd_range:
xt = order_has_ret[order_has_ret['ump_jump_mlfiterjumppdclass_hit'] > hd]['result'].value_counts()
hs = xt.sum()
hd_sum.append(hs)
hd_result.append(float(xt[0])/hs)
cmap = plt.get_cmap('jet', 20)
cmap.set_under('gray')
fig, ax = plt.subplots()
ax.plot(hd_range, hd_result)
cax = ax.scatter(hd_range, hd_result, c=hd_sum, cmap=cmap, vmin=np.min(hd_sum),
vmax=np.max(hd_sum))
ax.grid(True)
fig.colorbar(cax, label='hd_sum', extend='min')



![output_26_2.png](http:https://upload-images.jianshu.io/upload_images/3136804-b27af9e57982586d.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)

边裁在现在的规则中不需要需找参数

好了,这些都组织好之后,下一章就该看看这个系统能不能成为印钞机之路上那个重要组成环节了,下一章也是最终章节,如果您
真的能看到这里,并且大概知道我再说些什么,我就感觉很欣慰了,如果那样的话多多交流

## 感谢🙏您能有耐心看到这里
## 如果有什么问题可以加阿布的微信
## 微信号:aaaabbbuu


![mmexport1475383814280.jpg](http:https://upload-images.jianshu.io/upload_images/3136804-1973199290094fb7.jpg?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
Loading

0 comments on commit 5cd78f3

Please sign in to comment.