swift通過攝像頭讀取每一幀的圖片,并且做識別做人臉識別

jopen 9年前發布 | 72K 次閱讀 人臉識別 圖形/圖像處理

最近幫別人做一個項目,主要是使用攝像頭做人臉識別

github地址:https://github.com/qugang/AVCaptureVideoTemplate

要使用IOS的攝像頭,需要使用AVFoundation 庫,庫里面的東西我就不介紹。

啟動攝像頭需要使用AVCaptureSession 類。

然后得到攝像頭傳輸的每一幀數據,需要使用AVCaptureVideoDataOutputSampleBufferDelegate 委托。

首先在viewDidLoad 里添加找攝像頭設備的代碼,找到攝像頭設備以后,開啟攝像頭

captureSession.sessionPreset = AVCaptureSessionPresetLow
let devices = AVCaptureDevice.devices()
for device in devices {
  if (device.hasMediaType(AVMediaTypeVideo)) {
    if (device.position == AVCaptureDevicePosition.Front) {
      captureDevice = device as?AVCaptureDevice
      if captureDevice != nil {
        println("Capture Device found")
        beginSession()
      }
    }
  }
}

beginSession,開啟攝像頭:

func beginSession() {
  var err : NSError? = nil
  captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
  let output = AVCaptureVideoDataOutput()
  let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
  output.setSampleBufferDelegate(self, queue: cameraQueue)
  output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
  captureSession.addOutput(output)
  if err != nil {
    println("error: \(err?.localizedDescription)")
  }
  previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
  previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect"
  previewLayer?.frame = self.view.bounds
  self.view.layer.addSublayer(previewLayer)
  captureSession.startRunning()
}

開啟以后,實現captureOutput 方法:

func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
  if(self.isStart)
  {
    let resultImage = sampleBufferToImage(sampleBuffer)
    let context = CIContext(options:[kCIContextUseSoftwareRenderer:true])
    let detecotr = CIDetector(ofType:CIDetectorTypeFace,  context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh])
    let ciImage = CIImage(image: resultImage)
    let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6])
    for r in results {
      let face:CIFaceFeature = r as! CIFaceFeature;
      let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right)
      NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y)
      dispatch_async(dispatch_get_main_queue()) {
        if (self.isStart)
        {
          self.dismissViewControllerAnimated(true, completion: nil)
          self.didReceiveMemoryWarning()
          self.callBack!(face: faceImage!)
        }
        self.isStart = false
      }
    }
  }
}

在每一幀圖片上使用CIDetector 得到人臉,CIDetector 還可以得到眨眼,與微笑的人臉,如果要詳細使用去官方查看API

上面就是關鍵代碼,設置了有2秒的延遲,2秒之后開始人臉檢測。

全部代碼:

//
//  ViewController.swift
//  AVSessionTest
//
//  Created by qugang on 15/7/8.
//  Copyright (c) 2015年 qugang. All rights reserved.
//

import UIKit import AVFoundation class AVCaptireVideoPicController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate { var callBack :((face: UIImage) ->())? let captureSession = AVCaptureSession() var captureDevice : AVCaptureDevice? var previewLayer : AVCaptureVideoPreviewLayer? var pickUIImager : UIImageView = UIImageView(image: UIImage(named: "pick_bg")) var line : UIImageView = UIImageView(image: UIImage(named: "line")) var timer : NSTimer! var upOrdown = true var isStart = false override func viewDidLoad() { super.viewDidLoad() captureSession.sessionPreset = AVCaptureSessionPresetLow let devices = AVCaptureDevice.devices() for device in devices { if (device.hasMediaType(AVMediaTypeVideo)) { if (device.position == AVCaptureDevicePosition.Front) { captureDevice = device as?AVCaptureDevice if captureDevice != nil { println("Capture Device found") beginSession() } } } } pickUIImager.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100,width: 200,height: 200) line.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100, width: 200, height: 2) self.view.addSubview(pickUIImager) self.view.addSubview(line) timer = NSTimer.scheduledTimerWithTimeInterval(0.01, target: self, selector: "animationSate", userInfo: nil, repeats: true)

NSTimer.scheduledTimerWithTimeInterval(2, target: self, selector: "isStartTrue", userInfo: nil, repeats: false)

} func isStartTrue(){ self.isStart = true } override func didReceiveMemoryWarning(){ super.didReceiveMemoryWarning() captureSession.stopRunning() }

func animationSate(){ if upOrdown { if (line.frame.origin.y >= pickUIImager.frame.origin.y + 200) { upOrdown = false } else { line.frame.origin.y += 2 } } else { if (line.frame.origin.y <= pickUIImager.frame.origin.y) { upOrdown = true } else { line.frame.origin.y -= 2 } } } func beginSession() { var err : NSError? = nil captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err)) let output = AVCaptureVideoDataOutput() let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL) output.setSampleBufferDelegate(self, queue: cameraQueue) output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA] captureSession.addOutput(output) if err != nil { println("error: (err?.localizedDescription)") } previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect" previewLayer?.frame = self.view.bounds self.view.layer.addSublayer(previewLayer) captureSession.startRunning() } func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) { if(self.isStart) { let resultImage = sampleBufferToImage(sampleBuffer) let context = CIContext(options:[kCIContextUseSoftwareRenderer:true]) let detecotr = CIDetector(ofType:CIDetectorTypeFace, context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh]) let ciImage = CIImage(image: resultImage) let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6]) for r in results { let face:CIFaceFeature = r as! CIFaceFeature; let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right) NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y) dispatch_async(dispatch_get_main_queue()) { if (self.isStart) { self.dismissViewControllerAnimated(true, completion: nil) self.didReceiveMemoryWarning() self.callBack!(face: faceImage!) } self.isStart = false } } } } private func sampleBufferToImage(sampleBuffer: CMSampleBuffer!) -> UIImage { let imageBuffer: CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer) CVPixelBufferLockBaseAddress(imageBuffer, 0) let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0) let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer) let width = CVPixelBufferGetWidth(imageBuffer) let height = CVPixelBufferGetHeight(imageBuffer) let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB() let bitsPerCompornent = 8 var bitmapInfo = CGBitmapInfo((CGBitmapInfo.ByteOrder32Little.rawValue | CGImageAlphaInfo.PremultipliedFirst.rawValue) as UInt32) let newContext = CGBitmapContextCreate(baseAddress, width, height, bitsPerCompornent, bytesPerRow, colorSpace, bitmapInfo) as CGContextRef let imageRef: CGImageRef = CGBitmapContextCreateImage(newContext) let resultImage = UIImage(CGImage: imageRef, scale: 1.0, orientation: UIImageOrientation.Right)! return resultImage } func imageResize (imageObj:UIImage, sizeChange:CGSize)-> UIImage{ let hasAlpha = false let scale: CGFloat = 0.0

UIGraphicsBeginImageContextWithOptions(sizeChange, !hasAlpha, scale)
imageObj.drawInRect(CGRect(origin: CGPointZero, size: sizeChange))
let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
return scaledImage

} }</pre>

 本文由用戶 jopen 自行上傳分享,僅供網友學習交流。所有權歸原作者,若您的權利被侵害,請聯系管理員。
 轉載本站原創文章,請注明出處,并保留原始鏈接、圖片水印。
 本站是一個以用戶分享為主的開源技術平臺,歡迎各類分享!