您当前的位置:首页 > IT编程 > Keras
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch |

自学教程:Python layers.Cropping2D方法代码示例

51自学网 2020-12-01 11:09:08
  Keras
这篇教程Python layers.Cropping2D方法代码示例写得很实用,希望能帮到您。

本文整理汇总了Python中keras.layers.Cropping2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Cropping2D方法的具体用法?Python layers.Cropping2D怎么用?Python layers.Cropping2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers的用法示例。

在下文中一共展示了layers.Cropping2D方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: crop_to_fit

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def crop_to_fit(main, to_crop):    from keras.layers import Cropping2D    import keras.backend as K    cropped_skip = to_crop    skip_size = K.int_shape(cropped_skip)[1]    out_size = K.int_shape(main)[1]    if skip_size > out_size:        size_diff = (skip_size - out_size) // 2        size_diff_odd = ((skip_size - out_size) // 2) + ((skip_size - out_size) % 2)        cropped_skip = Cropping2D(((size_diff, size_diff_odd),) * 2)(cropped_skip)    return cropped_skip 
开发者ID:basveeling,项目名称:keras-gcnn,代码行数:13,代码来源:densenetnew.py


示例2: test_crop_simple

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def test_crop_simple(self):        input_shape = (48, 48, 3)        model = Sequential()        model.add(Cropping2D(cropping=((2, 5), (2, 5)), input_shape=input_shape))        # Set some random weights        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])        # Get the coreml model        self._test_model(model) 
开发者ID:apple,项目名称:coremltools,代码行数:13,代码来源:test_keras2_numeric.py


示例3: upsampling_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def upsampling_block(self, input_tensor, skip_tensor, filters, padding='valid',						 batchnorm=False, dropout=0.0):		x = Conv2DTranspose(filters, kernel_size=(2,2), strides=(2,2))(input_tensor)		# compute amount of cropping needed for skip_tensor		_, x_height, x_width, _ = K.int_shape(x)		_, s_height, s_width, _ = K.int_shape(skip_tensor)		h_crop = s_height - x_height		w_crop = s_width - x_width		assert h_crop >= 0		assert w_crop >= 0		if h_crop == 0 and w_crop == 0:			y = skip_tensor		else:			cropping = ((h_crop//2, h_crop - h_crop//2), (w_crop//2, w_crop - w_crop//2))			y = Cropping2D(cropping=cropping)(skip_tensor)		x = Concatenate()([x, y])		# no dilation in upsampling convolutions		x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x)		x = BatchNormalization()(x) if batchnorm else x		x = Activation('relu')(x)		x = Dropout(dropout)(x) if dropout > 0 else x		x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x)		x = BatchNormalization()(x) if batchnorm else x		x = Activation('relu')(x)		x = Dropout(dropout)(x) if dropout > 0 else x		return x 
开发者ID:jackkwok,项目名称:neural-road-inspector,代码行数:33,代码来源:unet.py


示例4: upsampling_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def upsampling_block(input_tensor, skip_tensor, filters, padding='same', batchnorm=True, dropout=0.0):    x = Conv2DTranspose(filters, kernel_size=(2, 2), strides=(2, 2))(input_tensor)    # compute amount of cropping needed for skip_tensor    _, x_height, x_width, _ = K.int_shape(x)    _, s_height, s_width, _ = K.int_shape(skip_tensor)    h_crop = s_height - x_height    w_crop = s_width - x_width    assert h_crop >= 0    assert w_crop >= 0    if h_crop == 0 and w_crop == 0:        y = skip_tensor    else:        cropping = ((h_crop // 2, h_crop - h_crop // 2), (w_crop // 2, w_crop - w_crop // 2))        y = Cropping2D(cropping=cropping)(skip_tensor)    x = Concatenate()([x, y])    x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x)    x = BatchNormalization()(x) if batchnorm else x    x = Activation('relu')(x)    x = Dropout(dropout)(x) if dropout > 0 else x    x = Conv2D(filters, kernel_size=(3, 3), padding=padding)(x)    x = BatchNormalization()(x) if batchnorm else x    x = Activation('relu')(x)    x = Dropout(dropout)(x) if dropout > 0 else x    return x 
开发者ID:neuropoly,项目名称:spinalcordtoolbox,代码行数:31,代码来源:cnn_models.py


示例5: get_unet

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def get_unet(img_shape = None):        dim_ordering = 'tf'        inputs = Input(shape = img_shape)        concat_axis = -1        ### the size of convolutional kernels is defined here            conv1 = Convolution2D(64, 5, 5, activation='relu', border_mode='same', dim_ordering=dim_ordering, name='conv1_1')(inputs)        conv1 = Convolution2D(64, 5, 5, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv1)        pool1 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv1)        conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(pool1)        conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv2)        pool2 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv2)        conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(pool2)        conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv3)        pool3 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv3)        conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(pool3)        conv4 = Convolution2D(256, 4, 4, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv4)        pool4 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv4)        conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(pool4)        conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv5)        up_conv5 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv5)        ch, cw = get_crop_shape(conv4, up_conv5)        crop_conv4 = Cropping2D(cropping=(ch,cw), dim_ordering=dim_ordering)(conv4)        up6 = merge([up_conv5, crop_conv4], mode='concat', concat_axis=concat_axis)        conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(up6)        conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv6)        up_conv6 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv6)        ch, cw = get_crop_shape(conv3, up_conv6)        crop_conv3 = Cropping2D(cropping=(ch,cw), dim_ordering=dim_ordering)(conv3)        up7 = merge([up_conv6, crop_conv3], mode='concat', concat_axis=concat_axis)        conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(up7)        conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv7)        up_conv7 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv7)        ch, cw = get_crop_shape(conv2, up_conv7)        crop_conv2 = Cropping2D(cropping=(ch,cw), dim_ordering=dim_ordering)(conv2)        up8 = merge([up_conv7, crop_conv2], mode='concat', concat_axis=concat_axis)        conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(up8)        conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv8)        up_conv8 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv8)        ch, cw = get_crop_shape(conv1, up_conv8)        crop_conv1 = Cropping2D(cropping=(ch,cw), dim_ordering=dim_ordering)(conv1)        up9 = merge([up_conv8, crop_conv1], mode='concat', concat_axis=concat_axis)        conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(up9)        conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', dim_ordering=dim_ordering)(conv9)        ch, cw = get_crop_shape(inputs, conv9)        conv9 = ZeroPadding2D(padding=(ch, cw), dim_ordering=dim_ordering)(conv9)        conv10 = Convolution2D(1, 1, 1, activation='sigmoid', dim_ordering=dim_ordering)(conv9)        model = Model(input=inputs, output=conv10)        model.compile(optimizer=Adam(lr=(1e-4)*2), loss=dice_coef_loss, metrics=[dice_coef_for_training])        return model###----define prepocessing methods/tricks for different datasets------------------------ 
开发者ID:hongweilibran,项目名称:wmh_ibbmTum,代码行数:63,代码来源:submission_sysu_.py


示例6: get_unet

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def get_unet(img_shape = None, first5=True):        inputs = Input(shape = img_shape)        concat_axis = -1        if first5: filters = 5        else: filters = 3        conv1 = conv_bn_relu(64, filters, inputs)        conv1 = conv_bn_relu(64, filters, conv1)        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)        conv2 = conv_bn_relu(96, 3, pool1)        conv2 = conv_bn_relu(96, 3, conv2)        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)        conv3 = conv_bn_relu(128, 3, pool2)        conv3 = conv_bn_relu(128, 3, conv3)        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)        conv4 = conv_bn_relu(256, 3, pool3)        conv4 = conv_bn_relu(256, 4, conv4)        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)        conv5 = conv_bn_relu(512, 3, pool4)        conv5 = conv_bn_relu(512, 3, conv5)        up_conv5 = UpSampling2D(size=(2, 2))(conv5)        ch, cw = get_crop_shape(conv4, up_conv5)        crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)        up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)        conv6 = conv_bn_relu(256, 3, up6)        conv6 = conv_bn_relu(256, 3, conv6)        up_conv6 = UpSampling2D(size=(2, 2))(conv6)        ch, cw = get_crop_shape(conv3, up_conv6)        crop_conv3 = Cropping2D(cropping=(ch,cw))(conv3)        up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)        conv7 = conv_bn_relu(128, 3, up7)        conv7 = conv_bn_relu(128, 3, conv7)        up_conv7 = UpSampling2D(size=(2, 2))(conv7)        ch, cw = get_crop_shape(conv2, up_conv7)        crop_conv2 = Cropping2D(cropping=(ch,cw))(conv2)        up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)        conv8 = conv_bn_relu(96, 3, up8)        conv8 = conv_bn_relu(96, 3, conv8)        up_conv8 = UpSampling2D(size=(2, 2))(conv8)        ch, cw = get_crop_shape(conv1, up_conv8)        crop_conv1 = Cropping2D(cropping=(ch,cw))(conv1)        up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)        conv9 = conv_bn_relu(64, 3, up9)        conv9 = conv_bn_relu(64, 3, conv9)        ch, cw = get_crop_shape(inputs, conv9)        conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)        conv10 = Conv2D(1, 1, activation='sigmoid', padding='same')(conv9) #, kernel_initializer='he_normal'        model = Model(inputs=inputs, outputs=conv10)        model.compile(optimizer=Adam(lr=(2e-4)), loss=dice_coef_loss)        return model 
开发者ID:hongweilibran,项目名称:wmh_ibbmTum,代码行数:61,代码来源:train_leave_one_out.py


示例7: _adjust_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None, weights=None):    '''    Adjusts the input `p` to match the shape of the `input`    or situations where the output number of filters needs to    be changed    # Arguments:        p: input tensor which needs to be modified        ip: input tensor whose shape needs to be matched        filters: number of output filters to be matched        weight_decay: l2 regularization weight        id: string id    # Returns:        an adjusted Keras tensor    '''    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1    img_dim = 2 if K.image_data_format() == 'channels_first' else -2    with K.name_scope('adjust_block'):        if p is None:            p = ip        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:            with K.name_scope('adjust_reduction_block_%s' % id):                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)                p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)                p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),                            name='adjust_conv_1_%s' % id, kernel_initializer='he_normal',                            weights=[weights['path1_conv']])(p1)                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),                            name='adjust_conv_2_%s' % id, kernel_initializer='he_normal',                            weights=[weights['path2_conv']])(p2)                p = concatenate([p1, p2], axis=channel_dim)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id,                                       weights=weights['final_bn'])(p)        elif p._keras_shape[channel_dim] != filters:            with K.name_scope('adjust_projection_block_%s' % id):                p = Activation('relu')(p)                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,                           use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal',                           weights=[weights['prev_conv']])(p)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id,                                       weights=weights['prev_bn'])(p)    return p 
开发者ID:titu1994,项目名称:Keras-NASNet,代码行数:56,代码来源:nasnet.py


示例8: _adjust_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):    '''    Adjusts the input `p` to match the shape of the `input`    or situations where the output number of filters needs to    be changed    # Arguments:        p: input tensor which needs to be modified        ip: input tensor whose shape needs to be matched        filters: number of output filters to be matched        weight_decay: l2 regularization weight        id: string id    # Returns:        an adjusted Keras tensor    '''    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1    img_dim = 2 if K.image_data_format() == 'channels_first' else -2    with K.name_scope('adjust_block'):        if p is None:            p = ip        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:            with K.name_scope('adjust_reduction_block_%s' % id):                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)                p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)                p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),                            name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),                            name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)                p = concatenate([p1, p2], axis=channel_dim)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id)(p)        elif p._keras_shape[channel_dim] != filters:            with K.name_scope('adjust_projection_block_%s' % id):                p = Activation('relu')(p)                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,                           use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id)(p)    return p 
开发者ID:titu1994,项目名称:Keras-NASNet,代码行数:51,代码来源:nasnet.py


示例9: _adjust_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):    '''    Adjusts the input `p` to match the shape of the `input`    or situations where the output number of filters needs to    be changed    # Arguments:        p: input tensor which needs to be modified        ip: input tensor whose shape needs to be matched        filters: number of output filters to be matched        weight_decay: l2 regularization weight        id: string id    # Returns:        an adjusted Keras tensor    '''    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1    img_dim = 2 if K.image_data_format() == 'channels_first' else -2    with K.name_scope('adjust_block'):        if p is None:            p = ip        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:            with K.name_scope('adjust_reduction_block_%s' % id):                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)                p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',                                      name='adjust_avg_pool_1_%s' % id)(p)                p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,                            kernel_regularizer=l2(weight_decay),                            name='adjust_conv_1_%s' % id,                            kernel_initializer='he_normal')(p1)                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',                                      name='adjust_avg_pool_2_%s' % id)(p2)                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,                            kernel_regularizer=l2(weight_decay),                            name='adjust_conv_2_%s' % id,                            kernel_initializer='he_normal')(p2)                p = concatenate([p1, p2], axis=channel_dim)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,                                       epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id)(p)        elif p._keras_shape[channel_dim] != filters:            with K.name_scope('adjust_projection_block_%s' % id):                p = Activation('relu')(p)                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same',                           name='adjust_conv_projection_%s' % id, use_bias=False,                           kernel_regularizer=l2(weight_decay),                           kernel_initializer='he_normal')(p)                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,                                       epsilon=_BN_EPSILON,                                       name='adjust_bn_%s' % id)(p)    return p 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:61,代码来源:nasnet.py


示例10: generator

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def generator(self):                if self.G:            return self.G                #Style FC, I only used 2 fully connected layers instead of 8 for faster training        inp_s = Input(shape = [latent_size])        sty = Dense(512, kernel_initializer = 'he_normal')(inp_s)        sty = LeakyReLU(0.1)(sty)        sty = Dense(512, kernel_initializer = 'he_normal')(sty)        sty = LeakyReLU(0.1)(sty)                #Get the noise image and crop for each size        inp_n = Input(shape = [im_size, im_size, 1])        noi = [Activation('linear')(inp_n)]        curr_size = im_size        while curr_size > 4:            curr_size = int(curr_size / 2)            noi.append(Cropping2D(int(curr_size/2))(noi[-1]))                #Here do the actual generation stuff        inp = Input(shape = [1])        x = Dense(4 * 4 * 512, kernel_initializer = 'he_normal')(inp)        x = Reshape([4, 4, 512])(x)        x = g_block(x, sty, noi[-1], 512, u=False)                if(im_size >= 1024):            x = g_block(x, sty, noi[7], 512) # Size / 64        if(im_size >= 512):            x = g_block(x, sty, noi[6], 384) # Size / 64        if(im_size >= 256):            x = g_block(x, sty, noi[5], 256) # Size / 32        if(im_size >= 128):            x = g_block(x, sty, noi[4], 192) # Size / 16        if(im_size >= 64):            x = g_block(x, sty, noi[3], 128) # Size / 8                    x = g_block(x, sty, noi[2], 64) # Size / 4        x = g_block(x, sty, noi[1], 32) # Size / 2        x = g_block(x, sty, noi[0], 16) # Size                x = Conv2D(filters = 3, kernel_size = 1, padding = 'same', activation = 'sigmoid')(x)                self.G = Model(inputs = [inp_s, inp_n, inp], outputs = x)                return self.G 
开发者ID:manicman1999,项目名称:StyleGAN-Keras,代码行数:48,代码来源:stylegan.py


示例11: generator

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def generator(self):                if self.G:            return self.G                inp_s = []        ss = im_size        while ss >= 4:            inp_s.append(Input(shape = [512]))            ss = int(ss / 2)                self.style_layers = len(inp_s)                #Get the noise image and crop for each size        inp_n = Input(shape = [im_size, im_size, 1])        noi = [Activation('linear')(inp_n)]        curr_size = im_size        while curr_size > 4:            curr_size = int(curr_size / 2)            noi.append(Cropping2D(int(curr_size/2))(noi[-1]))                #Here do the actual generation stuff        inp = Input(shape = [1])        x = Dense(4 * 4 * im_size, kernel_initializer = 'ones', bias_initializer = 'zeros')(inp)        x = Reshape([4, 4, im_size])(x)        x = g_block(x, inp_s[0], noi[-1], im_size, u=False)                if(im_size >= 1024):            x = g_block(x, inp_s[-8], noi[7], 512) # Size / 64        if(im_size >= 512):            x = g_block(x, inp_s[-7], noi[6], 384) # Size / 64        if(im_size >= 256):            x = g_block(x, inp_s[-6], noi[5], 256) # Size / 32        if(im_size >= 128):            x = g_block(x, inp_s[-5], noi[4], 192) # Size / 16        if(im_size >= 64):            x = g_block(x, inp_s[-4], noi[3], 128) # Size / 8                    x = g_block(x, inp_s[-3], noi[2], 64) # Size / 4        x = g_block(x, inp_s[-2], noi[1], 32) # Size / 2        x = g_block(x, inp_s[-1], noi[0], 16) # Size                x = Conv2D(filters = 3, kernel_size = 1, padding = 'same', activation = 'sigmoid', bias_initializer = 'zeros')(x)                self.G = Model(inputs = inp_s + [inp_n, inp], outputs = x)                return self.G 
开发者ID:manicman1999,项目名称:StyleGAN-Keras,代码行数:49,代码来源:mixed-stylegan.py


示例12: build_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Cropping2D [as 别名]def build_model(img_rows, img_cols, activation='relu', kernel_initializer= 'he_normal'):    inputs = Input((img_rows, img_cols, 1))    conv1 = Conv2D(64, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(inputs)    conv1 = Conv2D(64, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv1)    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)    conv2 = Conv2D(128, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(pool1)    conv2 = Conv2D(128, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv2)    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)    conv3 = Conv2D(256, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(pool2)    conv3 = Conv2D(256, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv3)    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)    conv4 = Conv2D(512, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(pool3)    conv4 = Conv2D(512, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv4)    drop4 = Dropout(0.5)(conv4)    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)    conv5 = Conv2D(1024, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(pool4)    conv5 = Conv2D(1024, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv5)    drop5 = Dropout(0.5)(conv5)    up6 = Conv2D(512, 2, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(UpSampling2D(size = (2,2))(drop5))    ch, cw = get_crop_shape(drop4, up6)    crop_drop4 = Cropping2D(cropping=(ch,cw))(drop4)    merge6 = Concatenate(axis=3)([crop_drop4, up6])    conv6 = Conv2D(512, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(merge6)    conv6 = Conv2D(512, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv6)    up7 = Conv2D(256, 2, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(UpSampling2D(size = (2,2))(conv6))    ch, cw = get_crop_shape(conv3, up7)    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)    merge7 = Concatenate(axis=3)([crop_conv3, up7])    conv7 = Conv2D(256, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(merge7)    conv7 = Conv2D(256, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv7)    up8 = Conv2D(128, 2, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(UpSampling2D(size = (2,2))(conv7))    ch, cw = get_crop_shape(conv2, up8)    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)    merge8 = Concatenate(axis=3)([crop_conv2, up8])    conv8 = Conv2D(128, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(merge8)    conv8 = Conv2D(128, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv8)    up9 = Conv2D(64, 2, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(UpSampling2D(size = (2,2))(conv8))    ch, cw = get_crop_shape(conv1, up9)    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)    merge9 = Concatenate(axis=3)([crop_conv1, up9])    conv9 = Conv2D(64, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(merge9)    conv9 = Conv2D(64, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv9)    conv9 = Conv2D(2, 3, activation = activation, padding = 'same', kernel_initializer = kernel_initializer)(conv9)    ch, cw = get_crop_shape(inputs, conv9)    conv9 = ZeroPadding2D(padding=(ch[0], cw[0]))(conv9)    conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)    model = Model(inputs, conv10)    return model 
开发者ID:ECP-CANDLE,项目名称:Benchmarks,代码行数:62,代码来源:unet.py


51自学网,即我要自学网,自学EXCEL、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。
京ICP备13026421号-1