Files
voicediary/VoiceDiary/ViewModels/RecordingViewModel.swift
Felix Förtsch dca03214b0 initial VoiceDiary iOS app setup
SwiftUI + SwiftData + iCloud, Apple Speech transcription (German),
audio recording, summarization service protocol (LLM-ready),
localization scaffolding (EN/DE/ES/FR), basic tests.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-15 22:57:41 +01:00

90 lines
2.2 KiB
Swift

import AVFoundation
import Foundation
import SwiftData
@Observable
@MainActor
final class RecordingViewModel {
var isRecording = false
var recordingDuration: TimeInterval = 0
var error: Error?
private let recorder = AudioRecorderService()
private let transcriptionService = TranscriptionService()
private var currentRecordingURL: URL?
var formattedDuration: String {
let minutes = Int(recorder.recordingDuration) / 60
let seconds = Int(recorder.recordingDuration) % 60
return String(format: "%d:%02d", minutes, seconds)
}
func startRecording() {
do {
currentRecordingURL = try recorder.startRecording()
isRecording = true
error = nil
} catch {
self.error = error
}
}
func stopRecording(context: ModelContext) {
guard let result = recorder.stopRecording() else { return }
isRecording = false
let today = Calendar.current.startOfDay(for: .now)
let entry = fetchOrCreateEntry(for: today, context: context)
let memo = VoiceMemo(
audioFileName: result.url.lastPathComponent,
duration: result.duration
)
entry.memos.append(memo)
entry.updatedAt = .now
Task {
await transcribeMemo(memo, context: context)
}
}
private func fetchOrCreateEntry(for date: Date, context: ModelContext) -> DiaryEntry {
let startOfDay = Calendar.current.startOfDay(for: date)
let endOfDay = Calendar.current.date(byAdding: .day, value: 1, to: startOfDay)!
var descriptor = FetchDescriptor<DiaryEntry>(
predicate: #Predicate { $0.date >= startOfDay && $0.date < endOfDay }
)
descriptor.fetchLimit = 1
if let existing = try? context.fetch(descriptor).first {
return existing
}
let entry = DiaryEntry(date: date)
context.insert(entry)
return entry
}
private func transcribeMemo(_ memo: VoiceMemo, context: ModelContext) async {
if transcriptionService.authorizationStatus == .notDetermined {
await transcriptionService.requestAuthorization()
}
guard transcriptionService.authorizationStatus == .authorized else {
return
}
memo.isTranscribing = true
defer { memo.isTranscribing = false }
do {
let transcript = try await transcriptionService.transcribe(audioURL: memo.audioURL)
memo.transcript = transcript
memo.entry?.updatedAt = .now
} catch {
self.error = error
}
}
}