第一种使用Vision框架加载
lazy var classificationRequest: [VNRequest] = {
do {
let model = try VNCoreMLModel(for: self.model.model)
let classificationRequest = VNCoreMLRequest(model: model, completionHandler: self.handleClassification)
return [ classificationRequest ]
} catch {
fatalError("Can't load Vision ML model: \(error)")
}
}()
func handleClassification(request: VNRequest, error: Error?) {
... ...
}
func runCoreML(buffer: CVImageBuffer) {
let newbuffer = self.resize(pixelBuffer: buffer)
let classifierRequestHandler = VNImageRequestHandler(cvPixelBuffer: newbuffer!, options: [:])
do {
try classifierRequestHandler.perform(self.classificationRequest)
} catch {
print(error)
}
第二种直接使用model
public func predict(buffer: CVImageBuffer){
let newbuffer = self.resize(pixelBuffer: buffer)
let _input= ModelInput(image: image!.buffer!)
if let output = try? model.prediction(from:_input) {
let mlMultiArray = output.net_output
... ...
} else {
print("error")
}
}
class ModelInput : MLFeatureProvider {
/// Input image of scene to be classified as color (kCVPixelFormatType_32BGRA) image buffer, 224 pixels wide by 224 pixels high
var image: CVPixelBuffer
var featureNames: Set<String> {
get {
return ["image"]
}
}
func featureValue(for featureName: String) -> MLFeatureValue? {
if (featureName == "image") {
return MLFeatureValue(pixelBuffer: image)
}
return nil
}
init(image: CVPixelBuffer) {
self.image = image
}
}