Skip to content

Commit

Permalink
v0.8.2
Browse files Browse the repository at this point in the history
Refactor DNN Layer
  • Loading branch information
浅梦 authored Oct 11, 2020
1 parent f9b07e4 commit e9c8f08
Show file tree
Hide file tree
Showing 39 changed files with 71 additions and 97 deletions.
4 changes: 2 additions & 2 deletions .github/ISSUE_TEMPLATE/bug_report.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ Steps to reproduce the behavior:

**Operating environment(运行环境):**
- python version [e.g. 3.5, 3.7]
- tensorflow version [e.g. 1.4.0, 1.15.0, 2.2.0]
- deepctr version [e.g. 0.8.0,]
- tensorflow version [e.g. 1.4.0, 1.15.0, 2.3.0]
- deepctr version [e.g. 0.8.2,]

**Additional context**
Add any other context about the problem here.
4 changes: 2 additions & 2 deletions .github/ISSUE_TEMPLATE/question.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,5 @@ Add any other context about the problem here.

**Operating environment(运行环境):**
- python version [e.g. 3.6]
- tensorflow version [e.g. 1.4.0, 1.5.0, 2.2.0]
- deepctr version [e.g. 0.8.0,]
- tensorflow version [e.g. 1.4.0, 1.5.0, 2.3.0]
- deepctr version [e.g. 0.8.2,]
9 changes: 4 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,16 @@ Let's [**Get Started!**](https://deepctr-doc.readthedocs.io/en/latest/Quick-Star

## Citation

- Weichen Shen. (2018). DeepCTR: Easy-to-use,Modular and Extendible package of deep-learning based CTR models. https://github.com/shenweichen/deepctr.
- Weichen Shen. (2017). DeepCTR: Easy-to-use,Modular and Extendible package of deep-learning based CTR models. https://github.com/shenweichen/deepctr.


If you find this code useful in your research, please cite it using the following BibTeX:

```bibtex
@misc{shen2018deepctr,
@misc{shen2017deepctr,
author = {Weichen Shen},
title = {DeepCTR: Easy-to-use,Modular and Extendible package of deep-learning based CTR models},
year = {2018},
year = {2017},
publisher = {GitHub},
journal = {GitHub Repository},
howpublished = {\url{https://github.com/shenweichen/deepctr}},
Expand All @@ -86,7 +86,6 @@ For more information about the recommendation system, such as **feature engineer

更多关于推荐系统的内容,如**特征工程,用户画像,召回,排序和多目标优化,在线学习与实时计算以及更多前沿技术和实战项目**等可参考:


- [推荐系统实战](https://www.julyedu.com/course/getDetail/181?ccode=5ee751d37278c)
- [推荐系统就业小班](https://www.julyedu.com/course/getDetail/321?ccode=5ee751d37278c)
- [互联网计算广告实战](https://www.julyedu.com/course/getDetail/158?ccode=5ee751d37278c)

2 changes: 1 addition & 1 deletion deepctr/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .utils import check_version

__version__ = '0.8.1'
__version__ = '0.8.2'
check_version(__version__)
6 changes: 2 additions & 4 deletions deepctr/estimator/models/autoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,12 @@ def _model_fn(features, labels, mode, config):
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

if len(dnn_hidden_units) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(deep_out)
elif att_layer_num > 0: # Only Interacting Layer
Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/ccpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,7 @@ def _model_fn(features, labels, mode, config):
k=min(k, int(conv_result.shape[1])), axis=1)(conv_result)

flatten_result = tf.keras.layers.Flatten()(pooling_result)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn,
dropout_rate=dnn_dropout, seed=seed)(flatten_result, training=train_flag)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, seed=seed)(flatten_result, training=train_flag)
dnn_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)

logits = linear_logits + dnn_logit
Expand Down
6 changes: 2 additions & 4 deletions deepctr/estimator/models/dcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,13 @@ def _model_fn(features, labels, mode, config):
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

if len(dnn_hidden_units) > 0 and cross_num > 0: # Deep & Cross
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(deep_out)
elif cross_num > 0: # Only Cross
Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/deepfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ def _model_fn(features, labels, mode, config):

fm_logit = FM()(concat_func(sparse_embedding_list, axis=1))

dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed=seed))(dnn_output)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/fibinet.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ def _model_fn(features, labels, mode, config):

dnn_input = combined_dnn_input(
[Flatten()(concat_func([senet_bilinear_out, bilinear_out]))], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input, training=train_flag)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logit = Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/fnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ def _model_fn(features, labels, mode, config):
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding=l2_reg_embedding)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, False, seed)(dnn_input, training=train_flag)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(deep_out)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/fwfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ def _model_fn(features, labels, mode, config):
if dnn_hidden_units:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)
final_logit_components.append(dnn_logit)
Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/nfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ def _model_fn(features, labels, mode, config):
if bi_dropout:
bi_out = tf.keras.layers.Dropout(bi_dropout)(bi_out, training=None)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input, training=train_flag)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/pnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,7 @@ def _model_fn(features, labels, mode, config):
deep_input = linear_signal

dnn_input = combined_dnn_input([deep_input], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input, training=train_flag)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/wdl.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ def _model_fn(features, labels, mode, config):
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding=l2_reg_embedding)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input, training=train_flag)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logits = Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)

Expand Down
3 changes: 1 addition & 2 deletions deepctr/estimator/models/xdeepfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ def _model_fn(features, labels, mode, config):
fm_input = concat_func(sparse_embedding_list, axis=1)

dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, training=train_flag)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)

Expand Down
26 changes: 15 additions & 11 deletions deepctr/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ def build(self, input_shape):
name="kernel")
self.bias = self.add_weight(
shape=(1,), initializer=Zeros(), name="bias")
self.dnn = DNN(self.hidden_units, self.activation, self.l2_reg,
self.dropout_rate, self.use_bn, seed=self.seed)
self.dnn = DNN(self.hidden_units, self.activation, self.l2_reg, self.dropout_rate, self.use_bn, seed=self.seed)

self.dense = tf.keras.layers.Lambda(lambda x: tf.nn.bias_add(tf.tensordot(
x[0], x[1], axes=(-1, 0)), x[2]))
Expand Down Expand Up @@ -134,16 +133,21 @@ class DNN(Layer):
- **use_bn**: bool. Whether use BatchNormalization before activation or not.
- **output_activation**: Activation function to use in the last layer.If ``None``,it will be same as ``activation``.
- **seed**: A Python integer to use as random seed.
"""

def __init__(self, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False, seed=1024, **kwargs):
def __init__(self, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False, output_activation=None,
seed=1024, **kwargs):
self.hidden_units = hidden_units
self.activation = activation
self.dropout_rate = dropout_rate
self.seed = seed
self.l2_reg = l2_reg
self.dropout_rate = dropout_rate
self.use_bn = use_bn
self.output_activation = output_activation
self.seed = seed

super(DNN, self).__init__(**kwargs)

def build(self, input_shape):
Expand All @@ -170,6 +174,9 @@ def build(self, input_shape):

self.activation_layers = [activation_layer(self.activation) for _ in range(len(self.hidden_units))]

if self.output_activation:
self.activation_layers[-1] = activation_layer(self.output_activation)

super(DNN, self).build(input_shape) # Be sure to call this somewhere!

def call(self, inputs, training=None, **kwargs):
Expand All @@ -179,9 +186,7 @@ def call(self, inputs, training=None, **kwargs):
for i in range(len(self.hidden_units)):
fc = tf.nn.bias_add(tf.tensordot(
deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i])
# fc = Dense(self.hidden_size[i], activation=None, \
# kernel_initializer=glorot_normal(seed=self.seed), \
# kernel_regularizer=l2(self.l2_reg))(deep_input)

if self.use_bn:
fc = self.bn_layers[i](fc, training=training)

Expand All @@ -202,7 +207,8 @@ def compute_output_shape(self, input_shape):

def get_config(self, ):
config = {'activation': self.activation, 'hidden_units': self.hidden_units,
'l2_reg': self.l2_reg, 'use_bn': self.use_bn, 'dropout_rate': self.dropout_rate, 'seed': self.seed}
'l2_reg': self.l2_reg, 'use_bn': self.use_bn, 'dropout_rate': self.dropout_rate,
'output_activation': self.output_activation, 'seed': self.seed}
base_config = super(DNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))

Expand Down Expand Up @@ -249,5 +255,3 @@ def get_config(self, ):
config = {'task': self.task, 'use_bias': self.use_bias}
base_config = super(PredictionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))


6 changes: 2 additions & 4 deletions deepctr/models/autoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,12 @@ def AutoInt(linear_feature_columns, dnn_feature_columns, att_layer_num=3, att_em
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

if len(dnn_hidden_units) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input, )
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, )
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(deep_out)
elif att_layer_num > 0: # Only Interacting Layer
Expand Down
3 changes: 1 addition & 2 deletions deepctr/models/ccpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ def CCPM(linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5),
k=min(k, int(conv_result.shape[1])), axis=1)(conv_result)

flatten_result = tf.keras.layers.Flatten()(pooling_result)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn,
dropout_rate=dnn_dropout)(flatten_result)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout)(flatten_result)
dnn_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)

final_logit = add_func([dnn_logit, linear_logit])
Expand Down
6 changes: 2 additions & 4 deletions deepctr/models/dcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,13 @@ def DCN(linear_feature_columns, dnn_feature_columns, cross_num=2, dnn_hidden_uni
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

if len(dnn_hidden_units) > 0 and cross_num > 0: # Deep & Cross
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(deep_out)
elif cross_num > 0: # Only Cross
Expand Down
3 changes: 1 addition & 2 deletions deepctr/models/deepfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ def DeepFM(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_

dnn_input = combined_dnn_input(list(chain.from_iterable(
group_embedding_dict.values())), dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed=seed))(dnn_output)

Expand Down
4 changes: 1 addition & 3 deletions deepctr/models/dien.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ def DIEN(dnn_feature_columns, history_feature_list,
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns,
to_list=True)
dnn_input_emb_list += sequence_embed_list

keys_emb = concat_func(keys_emb_list)
deep_input_emb = concat_func(dnn_input_emb_list)
query_emb = concat_func(query_emb_list)
Expand All @@ -202,8 +201,7 @@ def DIEN(dnn_feature_columns, history_feature_list,
deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)

dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, use_bn, seed)(dnn_input)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, use_bn, seed=seed)(dnn_input)
final_logit = Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(output)
output = PredictionLayer(task)(final_logit)

Expand Down
Loading

0 comments on commit e9c8f08

Please sign in to comment.