首页 文章

将训练有素的Keras图像分类模型转换为coreml并集成到iOS11中

提问于
浏览
8

使用在https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html找到的教程,我训练了一个Keras模型来识别猫和狗之间的区别 .

'''
Directory structure:

data/
    train/
        dogs/
            dog001.jpg
            dog002.jpg
            ...
        cats/
            cat001.jpg
            cat002.jpg
            ...
    validation/
        dogs/
            dog001.jpg
            dog002.jpg
            ...
        cats/
            cat001.jpg
            cat002.jpg
            ...
'''    
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from PIL import Image
import numpy as np

# dimensions of our images.
img_width, img_height = 150, 150

train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size)

model.save('first_try.h5')

使用coremltools documentation作为指南,我尝试将我的模型转换为coreml格式:

import coremltools
import h5py
coreml_model = coremltools.converters.keras.convert('first_try.h5',input_names='image',output_names='class',image_input_names = 'image',class_labels = ['cat', 'dog'], is_bgr=True)
coreml_model.save('cats_dogs.mlmodel')

当我将模型导入XCode并使用以下代码(使用Apple网站上的resnet50和inceptionv3模型)运行时,代码片段 guard let prediction = try? model.prediction(image: pixelBuffer!) else {print("error!") return } 打印"error!"并且永远不会到达 textView.text = "I think this is a \(String(describing: prediction.classLabel))." 代码段 .

import UIKit
    import Vision
    import CoreML

    class ViewController: UIViewController, UINavigationControllerDelegate {

        //MARK: - Properties


        @IBOutlet weak var imageView: UIImageView!
        @IBOutlet weak var textView: UITextView!

        let imagePicker = UIImagePickerController()

        //MARK: - ViewController

        override func viewDidLoad() {
            super .viewDidLoad()
            self.imagePicker.delegate = self
        }

        @IBAction func openImagePicker(_ sender: Any) {
            imagePicker.allowsEditing = false
            imagePicker.sourceType = .photoLibrary
            present(imagePicker, animated: true, completion: nil)
        }

        @IBAction func camera(_ sender: Any) {

            if !UIImagePickerController.isSourceTypeAvailable(.camera) {
                return
            }

            let cameraPicker = UIImagePickerController()
            cameraPicker.delegate = self
            cameraPicker.sourceType = .camera
            cameraPicker.allowsEditing = false

            present(cameraPicker, animated: true)
        }

    }

    extension ViewController: UIImagePickerControllerDelegate {
        func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
            dismiss(animated: true, completion: nil)
        }

        func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
            picker.dismiss(animated: true)
            textView.text = "Analyzing Image..."
            guard let image = info["UIImagePickerControllerOriginalImage"] as? UIImage else {
                return
            }

            UIGraphicsBeginImageContextWithOptions(CGSize(width: 150, height: 150), true, 2.0)
            image.draw(in: CGRect(x: 0, y: 0, width: 150, height: 150))
            let newImage = UIGraphicsGetImageFromCurrentImageContext()!
            UIGraphicsEndImageContext()

            let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
            var pixelBuffer : CVPixelBuffer?
            let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(newImage.size.width), Int(newImage.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
            guard (status == kCVReturnSuccess) else {
                return
            }

            CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
            let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)

            let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
            let context = CGContext(data: pixelData, width: Int(newImage.size.width), height: Int(newImage.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) //3

            context?.translateBy(x: 0, y: newImage.size.height)
            context?.scaleBy(x: 1.0, y: -1.0)

            UIGraphicsPushContext(context!)
            newImage.draw(in: CGRect(x: 0, y: 0, width: newImage.size.width, height: newImage.size.height))
            UIGraphicsPopContext()
            CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
            imageView.image = newImage

guard let prediction = try? model.prediction(image: pixelBuffer!) else {
        print("error!")
        return
    }
    textView.text = "I think this is a \(String(describing: prediction.classLabel))."
        }
    }

XCode Error Implementing Custom Keras-CoreML model

我已经广泛搜索了stackoverflow来解决这个问题 . 非常感谢帮助解决这个问题!

================================================== =========================编辑#1:

使用“print(try!model.prediction(image:pixelBuffer!)as Any)”我打印了以下错误:

2017-07-13 15:33:49.034967-0400 cats_dogs [60441:1198094]致命错误:'试试!'表达式意外地引发了错误:错误Domain = com.apple.CoreML Code = 0“图层'输出'的尺寸与类标签的数量不同 . ” UserInfo = {NSLocalizedDescription =图层'输出'的尺寸与类标签数量的大小不同 . }:

不确定“层'输出'的尺寸与类标签数量的大小不同”是指 .

================================================== =========================

编辑#2:

这是我用来将模型转换为.mlmodel格式的代码

import coremltools   
import h5py   
output_labels = ['cat','dog']   
coreml_model = coremltools.converters.keras.convert('first_try.h5',input_names='image',image_input_names = 'image',class_labels = output_labels, is_bgr=False)   
coreml_model.author = ''   
coreml_model.short_description = 'Model to classify images as either cats or dogs'   
coreml_model.input_description['image'] = 'Image of a cat or dog'   
print coreml_model   
coreml_model.save('cats_dogs.mlmodel')

这是终端输出:

0 : conv2d_1_input, <keras.engine.topology.InputLayer object at 0x1194c6c50>
1 : conv2d_1, <keras.layers.convolutional.Conv2D object at 0x1194c6c90>
2 : activation_1, <keras.layers.core.Activation object at 0x119515b90>
3 : max_pooling2d_1, <keras.layers.pooling.MaxPooling2D object at 0x119501e50>
4 : conv2d_2, <keras.layers.convolutional.Conv2D object at 0x119520cd0>
5 : activation_2, <keras.layers.core.Activation object at 0x1194e8150>
6 : max_pooling2d_2, <keras.layers.pooling.MaxPooling2D object at 0x11955cc50>
7 : conv2d_3, <keras.layers.convolutional.Conv2D object at 0x11955ce50>
8 : activation_3, <keras.layers.core.Activation object at 0x11954d9d0>
9 : max_pooling2d_3, <keras.layers.pooling.MaxPooling2D object at 0x119594cd0>
10 : flatten_1, <keras.layers.core.Flatten object at 0x1195a08d0>
11 : dense_1, <keras.layers.core.Dense object at 0x119579f10>
12 : activation_4, <keras.layers.core.Activation object at 0x1195c94d0>
13 : dense_2, <keras.layers.core.Dense object at 0x1195ea450>
14 : activation_5, <keras.layers.core.Activation object at 0x119614b10>

input {
  name: "image"
  shortDescription: "Image of a cat or dog"
  type {
    imageType {
      width: 150
      height: 150
      colorSpace: RGB
    }
  }
}
output {
  name: "output1"
  type {
    dictionaryType {
      stringKeyType {
      }
    }
  }
}
output {
  name: "classLabel"
  type {
    stringType {
    }
  }
}
predictedFeatureName: "classLabel"
predictedProbabilitiesName: "output1"
metadata {
  shortDescription: "Model to classify images as either cats or dogs"
  author: ""
}

1 回答

  • 0

    不确定“图层'输出'的尺寸与类标签数量的大小不同”是指 .

    这意味着模型的最后一层与您的类标签不同(我假设它是维度2) . 我建议删除此参数:

    class_labels = output_labels

    从您的模型转换,看看它是否解决了问题

相关问题