1 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import, print_function, division
4 import tensorflow.contrib.slim as slim
5 import tensorflow as tf
7 from libs.networks.mobilenet import mobilenet_v2
8 from libs.networks.mobilenet.mobilenet import training_scope
9 from libs.networks.mobilenet.mobilenet_v2 import op
10 from libs.networks.mobilenet.mobilenet_v2 import ops
11 expand_input = ops.expand_input_by_factor
15 # Note: these parameters of batch norm affect the architecture
16 # that's why they are here and not in training_scope.
17 (slim.batch_norm,): {'center': True, 'scale': True},
18 (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
19 'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
21 (ops.expanded_conv,): {
22 'expansion_size': expand_input(6),
24 'normalizer_fn': slim.batch_norm,
27 (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
30 op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
32 expansion_size=expand_input(1, divisible_by=1),
33 num_outputs=16, scope='expanded_conv'),
34 op(ops.expanded_conv, stride=2, num_outputs=24, scope='expanded_conv_1'),
35 op(ops.expanded_conv, stride=1, num_outputs=24, scope='expanded_conv_2'),
36 op(ops.expanded_conv, stride=2, num_outputs=32, scope='expanded_conv_3'),
37 op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_4'),
38 op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_5'),
39 op(ops.expanded_conv, stride=2, num_outputs=64, scope='expanded_conv_6'),
40 op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_7'),
41 op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_8'),
42 op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_9'),
43 op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_10'),
44 op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_11'),
45 op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_12')
52 # Note: these parameters of batch norm affect the architecture
53 # that's why they are here and not in training_scope.
54 (slim.batch_norm,): {'center': True, 'scale': True},
55 (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
56 'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
58 (ops.expanded_conv,): {
59 'expansion_size': expand_input(6),
61 'normalizer_fn': slim.batch_norm,
64 (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
67 op(ops.expanded_conv, stride=2, num_outputs=160, scope='expanded_conv_13'),
68 op(ops.expanded_conv, stride=1, num_outputs=160, scope='expanded_conv_14'),
69 op(ops.expanded_conv, stride=1, num_outputs=160, scope='expanded_conv_15'),
70 op(ops.expanded_conv, stride=1, num_outputs=320, scope='expanded_conv_16'),
71 op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280, scope='Conv_1')
74 def mobilenetv2_scope(is_training=True,
78 dropout_keep_prob=0.8,
80 """Defines Mobilenet training scope.
81 In default. We do not use BN
90 with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
91 with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
93 with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
98 def mobilenetv2_base(img_batch, is_training=True):
100 with slim.arg_scope(mobilenetv2_scope(is_training=is_training, trainable=True)):
102 feature_to_crop, endpoints = mobilenet_v2.mobilenet_base(input_tensor=img_batch,
105 depth_multiplier=1.0,
107 conv_defs=V2_BASE_DEF,
108 finegrain_classification_mode=False)
110 # feature_to_crop = tf.Print(feature_to_crop, [tf.shape(feature_to_crop)], summarize=10, message='rpn_shape')
111 return feature_to_crop
114 def mobilenetv2_head(inputs, is_training=True):
115 with slim.arg_scope(mobilenetv2_scope(is_training=is_training, trainable=True)):
116 net, _ = mobilenet_v2.mobilenet(input_tensor=inputs,
119 depth_multiplier=1.0,
121 conv_defs=V2_HEAD_DEF,
122 finegrain_classification_mode=False)
124 net = tf.squeeze(net, [1, 2])