您当前的位置:首页 > IT编程 > Keras
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch |

自学教程:Python layers.Add方法代码示例

51自学网 2020-12-01 11:08:50
  Keras
这篇教程Python layers.Add方法代码示例写得很实用,希望能帮到您。

本文整理汇总了Python中keras.layers.Add方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Add方法的具体用法?Python layers.Add怎么用?Python layers.Add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers的用法示例。

在下文中一共展示了layers.Add方法的22个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: residual

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def residual(_x, out_dim, name, stride=1):  shortcut = _x  num_channels = K.int_shape(shortcut)[-1]  _x = ZeroPadding2D(padding=1, name=name + '.pad1')(_x)  _x = Conv2D(out_dim, 3, strides=stride, use_bias=False, name=name + '.conv1')(_x)  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(_x)  _x = Activation('relu', name=name + '.relu1')(_x)  _x = Conv2D(out_dim, 3, padding='same', use_bias=False, name=name + '.conv2')(_x)  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(_x)  if num_channels != out_dim or stride != 1:    shortcut = Conv2D(out_dim, 1, strides=stride, use_bias=False, name=name + '.shortcut.0')(        shortcut)    shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut)  _x = Add(name=name + '.add')([_x, shortcut])  _x = Activation('relu', name=name + '.relu')(_x)  return _x 
开发者ID:see--,项目名称:keras-centernet,代码行数:21,代码来源:hourglass.py


示例2: expand_conv

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def expand_conv(init, base, k, strides=(1, 1)):    x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal',                      use_bias=False)(init)    channel_axis = 1 if K.image_data_format() == "channels_first" else -1    x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)    x = Activation('relu')(x)    x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',                      use_bias=False)(x)    skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal',                      use_bias=False)(init)    m = Add()([x, skip])    return m 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:20,代码来源:wide_residual_network.py


示例3: conv_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def conv_block(input, base, k=1, dropout=0.0):    init = input    channel_axis = 1 if K.image_data_format() == "channels_first" else -1    x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(input)    x = Activation('relu')(x)    x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',                      use_bias=False)(x)    if dropout > 0.0: x = Dropout(dropout)(x)    x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)    x = Activation('relu')(x)    x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',                      use_bias=False)(x)    m = Add()([init, x])    return m 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:21,代码来源:wide_residual_network.py


示例4: __call__

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def __call__(self, x, encoder_output, return_attention=False):		x_embedded = self._embedding(x)		pos_encoding = self._position_encoding(x)		pos_encoding_embedded = self._position_embedding(pos_encoding)		x = Add()([x_embedded, pos_encoding_embedded])		self_atts = []		enc_atts = []		for layer in self._layers:			x, self_att, enc_att = layer(x, encoder_output)			if return_attention: 				self_atts.append(self_att)				enc_atts.append(enc_att)		 		if return_attention: 			return [x, self_atts, enc_atts]		else:			return x 
开发者ID:zimmerrol,项目名称:attention-is-all-you-need-keras,代码行数:22,代码来源:model.py


示例5: _residual_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def _residual_block(self, units, inputs):        out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,                        kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,                        use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,                        bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs)        out = Dropout(self.dropout)(out)        out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,                        kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,                        use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,                        bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(out)        out = BatchNormalization(trainable=True)(out)        if K.int_shape(inputs)[-1] != K.int_shape(out)[-1]:            inputs = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,                        kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,                        use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,                        bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs)        out = Add()([inputs, out])        return out 
开发者ID:albertogaspar,项目名称:dts,代码行数:21,代码来源:FFNN.py


示例6: shortcut_pool

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def shortcut_pool(inputs, output, filters=256, pool_type='max', shortcut=True):    """        ResNet(shortcut连接|skip连接|residual连接),         这里是用shortcut连接. 恒等映射, block+f(block)        再加上 downsampling实现        参考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py    :param inputs: tensor    :param output: tensor    :param filters: int    :param pool_type: str, 'max'、'k-max' or 'conv' or other    :param shortcut: boolean    :return: tensor    """    if shortcut:        conv_2 = Conv1D(filters=filters, kernel_size=1, strides=2, padding='SAME')(inputs)        conv_2 = BatchNormalization()(conv_2)        output = downsampling(output, pool_type=pool_type)        out = Add()([output, conv_2])    else:        out = ReLU(inputs)        out = downsampling(out, pool_type=pool_type)    if pool_type is not None: # filters翻倍        out = Conv1D(filters=filters*2, kernel_size=1, strides=1, padding='SAME')(out)        out = BatchNormalization()(out)    return out 
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:27,代码来源:graph.py


示例7: __init__

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def __init__(self, name: str, num_heads: int,                 residual_dropout: float = 0, attention_dropout: float = 0,                 activation: Optional[Union[str, Callable]] = 'gelu',                 compression_window_size: int = None,                 use_masking: bool = True,                 vanilla_wiring=False):        self.attention_layer = MultiHeadSelfAttention(            num_heads, use_masking=use_masking, dropout=attention_dropout,            compression_window_size=compression_window_size,            name=f'{name}_self_attention')        self.norm1_layer = LayerNormalization(name=f'{name}_normalization1')        self.dropout_layer = (            Dropout(residual_dropout, name=f'{name}_dropout')            if residual_dropout > 0            else lambda x: x)        self.norm2_layer = LayerNormalization(name=f'{name}_normalization2')        self.transition_layer = TransformerTransition(            name=f'{name}_transition', activation=activation)        self.addition_layer = Add(name=f'{name}_add')        self.vanilla_wiring = vanilla_wiring 
开发者ID:kpot,项目名称:keras-transformer,代码行数:22,代码来源:transformer.py


示例8: expand_conv

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def expand_conv(init, base, k, strides=(1, 1)):    x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal',                      use_bias=False)(init)    channel_axis = 1 if K.image_data_format() == "channels_first" else -1    x = BatchRenormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_init='uniform')(x)    x = Activation('relu')(x)    x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',                      use_bias=False)(x)    skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal',                      use_bias=False)(init)    m = Add()([x, skip])    return m 
开发者ID:titu1994,项目名称:BatchRenormalization,代码行数:20,代码来源:wrn_renorm.py


示例9: build_generator

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def build_generator(self):        """Resnet Generator"""        def residual_block(layer_input):            """Residual block described in paper"""            d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)            d = BatchNormalization(momentum=0.8)(d)            d = Activation('relu')(d)            d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)            d = BatchNormalization(momentum=0.8)(d)            d = Add()([d, layer_input])            return d        # Image input        img = Input(shape=self.img_shape)        l1 = Conv2D(64, kernel_size=3, padding='same', activation='relu')(img)        # Propogate signal through residual blocks        r = residual_block(l1)        for _ in range(self.residual_blocks - 1):            r = residual_block(r)        output_img = Conv2D(self.channels, kernel_size=3, padding='same', activation='tanh')(r)        return Model(img, output_img) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:28,代码来源:pixelda.py


示例10: identity_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def identity_block(input_tensor, kernel_size, filters, stage, block,                   use_bias=True, train_bn=True):    """The identity_block is the block that has no conv layer at shortcut    # Arguments        input_tensor: input tensor        kernel_size: default 3, the kernel size of middle conv layer at main path        filters: list of integers, the nb_filters of 3 conv layer at main path        stage: integer, current stage label, used for generating layer names        block: 'a','b'..., current block label, used for generating layer names        use_bias: Boolean. To use or not use a bias in conv layers.        train_bn: Boolean. Train or freeze Batch Norm layers    """    nb_filter1, nb_filter2, nb_filter3 = filters    conv_name_base = 'res' + str(stage) + block + '_branch'    bn_name_base = 'bn' + str(stage) + block + '_branch'    x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',                  use_bias=use_bias)(input_tensor)    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',                  name=conv_name_base + '2b', use_bias=use_bias)(x)    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',                  use_bias=use_bias)(x)    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)    x = KL.Add()([x, input_tensor])    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)    return x 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:35,代码来源:model.py


示例11: conv_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def conv_block(input_tensor, kernel_size, filters, stage, block,               strides=(2, 2), use_bias=True, train_bn=True):    """conv_block is the block that has a conv layer at shortcut    # Arguments        input_tensor: input tensor        kernel_size: default 3, the kernel size of middle conv layer at main path        filters: list of integers, the nb_filters of 3 conv layer at main path        stage: integer, current stage label, used for generating layer names        block: 'a','b'..., current block label, used for generating layer names        use_bias: Boolean. To use or not use a bias in conv layers.        train_bn: Boolean. Train or freeze Batch Norm layers    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)    And the shortcut should have subsample=(2,2) as well    """    nb_filter1, nb_filter2, nb_filter3 = filters    conv_name_base = 'res' + str(stage) + block + '_branch'    bn_name_base = 'bn' + str(stage) + block + '_branch'    x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,                  name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',                  name=conv_name_base + '2b', use_bias=use_bias)(x)    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +                  '2c', use_bias=use_bias)(x)    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)    shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,                         name=conv_name_base + '1', use_bias=use_bias)(input_tensor)    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)    x = KL.Add()([x, shortcut])    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)    return x 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:41,代码来源:model.py


示例12: get_srresnet_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):    def _residual_block(inputs):        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)        x = BatchNormalization()(x)        x = PReLU(shared_axes=[1, 2])(x)        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)        x = BatchNormalization()(x)        m = Add()([x, inputs])        return m    inputs = Input(shape=(None, None, input_channel_num))    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)    x = PReLU(shared_axes=[1, 2])(x)    x0 = x    for i in range(resunit_num):        x = _residual_block(x)    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)    x = BatchNormalization()(x)    x = Add()([x, x0])    x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x)    model = Model(inputs=inputs, outputs=x)    return model# UNet: code from https://github.com/pietz/unet-keras 
开发者ID:zxq2233,项目名称:n2n-watermark-remove,代码行数:31,代码来源:model.py


示例13: resblock_body

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def resblock_body(x, num_filters, num_blocks):    '''A series of resblocks starting with a downsampling Convolution2D'''    # Darknet uses left and top padding instead of 'same' mode    x = ZeroPadding2D(((1,0),(1,0)))(x)    x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)    for i in range(num_blocks):        y = compose(                DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),                DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)        x = Add()([x,y])    return x 
开发者ID:bing0037,项目名称:keras-yolo3,代码行数:13,代码来源:model.py


示例14: _inverted_res_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):    in_channels = inputs.shape[-1].value  # inputs._keras_shape[-1]    pointwise_conv_filters = int(filters * alpha)    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)    x = inputs    prefix = 'expanded_conv_{}_'.format(block_id)    if block_id:        # Expand        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',                   use_bias=False, activation=None,                   name=prefix + 'expand')(x)        x = BatchNormalization(epsilon=1e-3, momentum=0.999,                               name=prefix + 'expand_BN')(x)        x = Activation(relu6, name=prefix + 'expand_relu')(x)    else:        prefix = 'expanded_conv_'    # Depthwise    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,                        use_bias=False, padding='same', dilation_rate=(rate, rate),                        name=prefix + 'depthwise')(x)    x = BatchNormalization(epsilon=1e-3, momentum=0.999,                           name=prefix + 'depthwise_BN')(x)    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)    # Project    x = Conv2D(pointwise_filters,               kernel_size=1, padding='same', use_bias=False, activation=None,               name=prefix + 'project')(x)    x = BatchNormalization(epsilon=1e-3, momentum=0.999,                           name=prefix + 'project_BN')(x)    if skip_connection:        return Add(name=prefix + 'add')([inputs, x])    # if in_channels == pointwise_filters and stride == 1:    #    return Add(name='res_connect_' + str(block_id))([inputs, x])    return x 
开发者ID:bubbliiiing,项目名称:Semantic-Segmentation,代码行数:42,代码来源:mobilenetV2.py


示例15: res_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def res_block(input_tensor, f):    x = input_tensor    x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same")(x)    x = LeakyReLU(alpha=0.2)(x)    x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding="same")(x)    x = Add()([x, input_tensor])    x = LeakyReLU(alpha=0.2)(x)    return x 
开发者ID:dfaker,项目名称:df,代码行数:10,代码来源:model.py


示例16: DC_CNN_Block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):    def f(input_):                residual =    input_                layer_out =   Conv1D(filters=nb_filter, kernel_size=filter_length,                       dilation_rate=dilation,                       activation='linear', padding='causal', use_bias=False,                      kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,                       seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)                            layer_out =   Activation('selu')(layer_out)                skip_out =    Conv1D(1,1, activation='linear', use_bias=False,                       kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,                       seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)                network_in =  Conv1D(1,1, activation='linear', use_bias=False,                       kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,                       seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)                              network_out = Add()([residual, network_in])                return network_out, skip_out        return f 
开发者ID:kristpapadopoulos,项目名称:seriesnet,代码行数:28,代码来源:seriesnet.py


示例17: DC_CNN_Model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def DC_CNN_Model(length):        input = Input(shape=(length,1))        l1a, l1b = DC_CNN_Block(32,2,1,0.001)(input)        l2a, l2b = DC_CNN_Block(32,2,2,0.001)(l1a)     l3a, l3b = DC_CNN_Block(32,2,4,0.001)(l2a)    l4a, l4b = DC_CNN_Block(32,2,8,0.001)(l3a)    l5a, l5b = DC_CNN_Block(32,2,16,0.001)(l4a)    l6a, l6b = DC_CNN_Block(32,2,32,0.001)(l5a)    l6b = Dropout(0.8)(l6b) #dropout used to limit influence of earlier data    l7a, l7b = DC_CNN_Block(32,2,64,0.001)(l6a)    l7b = Dropout(0.8)(l7b) #dropout used to limit influence of earlier data    l8 =   Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])        l9 =   Activation('relu')(l8)               l21 =  Conv1D(1,1, activation='linear', use_bias=False,            kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),           kernel_regularizer=l2(0.001))(l9)    model = Model(input=input, output=l21)        adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None,                            decay=0.0, amsgrad=False)    model.compile(loss='mae', optimizer=adam, metrics=['mse'])        return model 
开发者ID:kristpapadopoulos,项目名称:seriesnet,代码行数:32,代码来源:seriesnet.py


示例18: __init__

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def __init__(self, n_state: int, n_head: int, d_hid: int, residual_dropout: float, attention_dropout: float,                 use_attn_mask: bool, layer_id: int, **kwargs) -> None:        self.attention = MultiHeadSelfAttention(n_state, n_head, attention_dropout, use_attn_mask, layer_id)        self.drop1 = Dropout(residual_dropout, name='layer_{}/ln_1_drop'.format(layer_id))        self.add1 = Add(name='layer_{}/ln_1_add'.format(layer_id))        self.ln1 = LayerNormalization(name='layer_{}/ln_1'.format(layer_id))        self.ffn = PositionWiseFF(n_state, d_hid, layer_id)        self.drop2 = Dropout(residual_dropout, name='layer_{}/ln_2_drop'.format(layer_id))        self.add2 = Add(name='layer_{}/ln_2_add'.format(layer_id))        self.ln2 = LayerNormalization(name='layer_{}/ln_2'.format(layer_id)) 
开发者ID:yyht,项目名称:BERT,代码行数:12,代码来源:model.py


示例19: identity_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def identity_block(input_tensor, kernel_size, filters, stage, block,                   use_bias=True):    """The identity_block is the block that has no conv layer at shortcut    # Arguments        input_tensor: input tensor        kernel_size: defualt 3, the kernel size of middle conv layer at main path        filters: list of integers, the nb_filters of 3 conv layer at main path        stage: integer, current stage label, used for generating layer names        block: 'a','b'..., current block label, used for generating layer names    """    nb_filter1, nb_filter2, nb_filter3 = filters    conv_name_base = 'res' + str(stage) + block + '_branch'    bn_name_base = 'bn' + str(stage) + block + '_branch'    x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',                  use_bias=use_bias)(input_tensor)    x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',                  name=conv_name_base + '2b', use_bias=use_bias)(x)    x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',                  use_bias=use_bias)(x)    x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)    x = KL.Add()([x, input_tensor])    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)    return x 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:33,代码来源:model.py


示例20: conv_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def conv_block(input_tensor, kernel_size, filters, stage, block,               strides=(2, 2), use_bias=True):    """conv_block is the block that has a conv layer at shortcut    # Arguments        input_tensor: input tensor        kernel_size: defualt 3, the kernel size of middle conv layer at main path        filters: list of integers, the nb_filters of 3 conv layer at main path        stage: integer, current stage label, used for generating layer names        block: 'a','b'..., current block label, used for generating layer names    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)    And the shortcut should have subsample=(2,2) as well    """    nb_filter1, nb_filter2, nb_filter3 = filters    conv_name_base = 'res' + str(stage) + block + '_branch'    bn_name_base = 'bn' + str(stage) + block + '_branch'    x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,                  name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)    x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',                  name=conv_name_base + '2b', use_bias=use_bias)(x)    x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)    x = KL.Activation('relu')(x)    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x)    x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)    shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,                         name=conv_name_base + '1', use_bias=use_bias)(input_tensor)    shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)    x = KL.Add()([x, shortcut])    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)    return x 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:38,代码来源:model.py


示例21: compile

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def compile(self, learning_rate, momentum):        """Gets the model ready for training. Adds losses, regularization, and        metrics. Then calls the Keras compile() function.        """        # Optimizer object        optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,                                         clipnorm=5.0)        # Add Losses        # First, clear previously set losses to avoid duplication        self.keras_model._losses = []        self.keras_model._per_input_losses = {}        loss_names = ["rpn_class_loss", "rpn_bbox_loss",                      "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]        for name in loss_names:            layer = self.keras_model.get_layer(name)            if layer.output in self.keras_model.losses:                continue            self.keras_model.add_loss(tf.reduce_mean(layer.output, keepdims=True))        # Add L2 Regularization        reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w)                      for w in self.keras_model.trainable_weights]        self.keras_model.add_loss(tf.add_n(reg_losses))        # Compile        self.keras_model.compile(optimizer=optimizer, loss=[None] * len(self.keras_model.outputs))        # Add metrics        for name in loss_names:            if name in self.keras_model.metrics_names:                continue            layer = self.keras_model.get_layer(name)            self.keras_model.metrics_names.append(name)            self.keras_model.metrics_tensors.append(tf.reduce_mean(layer.output,                                                                   keepdims=True)) 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:37,代码来源:model.py


示例22: _inverted_res_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Add [as 别名]def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):    in_channels = inputs._keras_shape[-1]    pointwise_conv_filters = int(filters * alpha)    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)    x = inputs    prefix = 'expanded_conv_{}_'.format(block_id)    if block_id:        # Expand        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',                   use_bias=False, activation=None,                   name=prefix + 'expand')(x)        x = BatchNormalization(epsilon=1e-3, momentum=0.999,                               name=prefix + 'expand_BN')(x)        x = Activation(relu6, name=prefix + 'expand_relu')(x)    else:        prefix = 'expanded_conv_'    # Depthwise    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,                        use_bias=False, padding='same', dilation_rate=(rate, rate),                        name=prefix + 'depthwise')(x)    x = BatchNormalization(epsilon=1e-3, momentum=0.999,                           name=prefix + 'depthwise_BN')(x)    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)    # Project    x = Conv2D(pointwise_filters,               kernel_size=1, padding='same', use_bias=False, activation=None,               name=prefix + 'project')(x)    x = BatchNormalization(epsilon=1e-3, momentum=0.999,                           name=prefix + 'project_BN')(x)    if skip_connection:        return Add(name=prefix + 'add')([inputs, x])    # if in_channels == pointwise_filters and stride == 1:    #    return Add(name='res_connect_' + str(block_id))([inputs, x])    return x 
开发者ID:andrewekhalel,项目名称:edafa,代码行数:42,代码来源:model.py


51自学网,即我要自学网,自学EXCEL、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。
京ICP备13026421号-1