我有一个带有 SwiftUI 生命周期的简单 SwiftUI 应用程序,我正在尝试使用 AVFoundation 自动拍照。我将根据条件或基于计时器来执行此操作 - 但对于此示例,我只想在启动时拍照并显示它(不是预览层)。我不希望需要任何用户操作。
我显然不了解正确的设置和捕获。
import SwiftUI
import AVFoundation
struct ContentView: View {
let dataStore = DataStore.shared
@State private var captureSession = AVCaptureSession()
@State private var backCamera : AVCaptureDevice?
@State private var frontCamera : AVCaptureDevice?
@State private var currentCamera : AVCaptureDevice?
@State private var photoOutput : AVCapturePhotoOutput?
@State private var capturedImage: UIImage?
var body: some View {
VStack {
Text("Take a Photo Automatically")
.padding()
ZStack {
RoundedRectangle(cornerRadius: 0)
.stroke(Color.blue, lineWidth: 4)
.frame(width: 320, height: 240, alignment: .center)
Image(uiImage: dataStore.capturedImage)
}
Spacer()
}
.onAppear {
if UIImagePickerController.isSourceTypeAvailable(.camera){
self.setupCaptureSession()
self.setupDevices()
self.setupInputOutput()
self.startRunningCaptureSession()
} else {
print("No Camera is Available")
}
}
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
//you only get here if there is a camera ( ! ok )
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
})
captureSession.addOutput(photoOutput!)
captureSession.commitConfiguration()
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}
}//setupInputOutput
func startRunningCaptureSession() {
let settings = AVCapturePhotoSettings()
captureSession.startRunning()
photoOutput?.capturePhoto(with: settings, delegate: PhotoDelegate())
}//startRunningCaptureSession
}//struct
class PhotoDelegate: NSObject, AVCapturePhotoCaptureDelegate {
let dataStore = DataStore.shared
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation(),
let image = UIImage(data: data) else {
return
}
dataStore.capturedImage = image
}
}//photo delegate
class DataStore {
static let shared = DataStore()
private init() {}
@Published var capturedImage: UIImage = UIImage()
}//dataStore
任何指导将不胜感激。Xcode 12.5.1 iOS 14.5
第二次尝试添加示例:
稍后编辑。在使用 Philip Dukhov 提供的更改纠正了我的方法后,我仍然遇到了一个问题 - 图像仍然很暗 - 以至于无法识别物体。经过大量的试验和错误后,相机在拍摄之前似乎需要一些非常短的时间来设置。虽然这似乎不是一个好的编程计划,但我在会话捕获之前设置了一点延迟。短短 0.1 秒似乎就足够了。现在开始捕获:
func startRunningCaptureSession() {
let settings = AVCapturePhotoSettings()
captureSession.startRunning()
//don't know why this is needed - but it works. Low number of tests at 0.1 all work
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
self.photoOutput?.capturePhoto(with: settings, delegate: self)
}
}//start Running Capture Session
如果您知道更好的方法 - 请告诉我。