diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 78b1811..3b3ec26 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,32 +1,55 @@ -name: Run Tests +name: Tests on: push: pull_request: jobs: - test: + test-ios: + name: Tests (iOS) runs-on: macos-26 - + steps: - name: Checkout code uses: actions/checkout@v4 - + - name: List available simulators run: xcrun simctl list devices available - - - name: Run tests + + - name: Run tests on iOS Simulator run: | xcodebuild test \ -scheme AudioSnapshotTesting \ -destination 'platform=iOS Simulator,name=iPad Pro 13-inch (M5),OS=latest' \ - -resultBundlePath TestResults.xcresult - continue-on-error: true + -resultBundlePath TestResults-iOS.xcresult + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results-iOS + path: TestResults-iOS.xcresult + retention-days: 30 + + test-macos: + name: Tests (macOS) + runs-on: macos-26 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run tests on macOS + run: | + xcodebuild test \ + -scheme AudioSnapshotTesting \ + -destination 'platform=macOS' \ + -resultBundlePath TestResults-macOS.xcresult - name: Upload test results if: always() uses: actions/upload-artifact@v4 with: - name: test-results - path: TestResults.xcresult + name: test-results-macOS + path: TestResults-macOS.xcresult retention-days: 30 diff --git a/.gitignore b/.gitignore index 0023a53..1e54057 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ DerivedData/ .swiftpm/configuration/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc +.claude/settings.local.json diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Package.resolved b/Package.resolved deleted file mode 100644 index 010f091..0000000 --- a/Package.resolved +++ /dev/null @@ -1,24 +0,0 @@ -{ - "originHash" : "7f45e81cfb99a5981a5c389727fb68a24ce2a2a08b558a3939346e3a27b16c7e", - "pins" : [ - { - "identity" : "swift-snapshot-testing", - "kind" : "remoteSourceControl", - "location" : "https://github.com/pointfreeco/swift-snapshot-testing", - "state" : { - "revision" : "42a086182681cf661f5c47c9b7dc3931de18c6d7", - "version" : "1.17.6" - } - }, - { - "identity" : "swift-syntax", - "kind" : "remoteSourceControl", - "location" : "https://github.com/swiftlang/swift-syntax", - "state" : { - "revision" : "0687f71944021d616d34d922343dcef086855920", - "version" : "600.0.1" - } - } - ], - "version" : 3 -} diff --git a/Package.swift b/Package.swift index faeddc2..1111a5c 100644 --- a/Package.swift +++ b/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version: 6.0 +// swift-tools-version: 6.2 // The swift-tools-version declares the minimum version of Swift required to build this package. import PackageDescription @@ -12,15 +12,9 @@ let package = Package( targets: ["AudioSnapshotTesting"] ) ], - dependencies: [ - .package(url: "https://github.com/pointfreeco/swift-snapshot-testing", from: "1.17.6") - ], targets: [ .target( - name: "AudioSnapshotTesting", - dependencies: [ - .product(name: "SnapshotTesting", package: "swift-snapshot-testing") - ] + name: "AudioSnapshotTesting" ), .testTarget( name: "AudioSnapshotTestingTests", diff --git a/README.md b/README.md index 66a22d4..c988c31 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,18 @@ # AudioSnapshotTesting -A Swift package for [SnapshotTesting](https://github.com/pointfreeco/swift-snapshot-testing) audio buffers in your iOS/macOS apps. +A lightweight Swift package for snapshot testing audio buffers in your iOS/macOS apps with visual representations. - +[![Tests](https://github.com/infinum/AudioSnapshotTesting/actions/workflows/tests.yml/badge.svg)](https://github.com/infinum/AudioSnapshotTesting/actions/workflows/tests.yml) ## Description -AudioSnapshotTesting provides snapshot strategies for testing audio-related functionality through visual snapshots. This makes it easier to verify audio processing and manipulation in a visual, deterministic way. +AudioSnapshotTesting is a standalone snapshot testing library (no external dependencies) that works with Swift Testing framework. It provides strategies for waveform, spectrum, and spectrogram visualization, making it easy to verify audio processing and manipulation through deterministic snapshot comparisons. Snapshots are stored as lossless ALAC-encoded audio files with optional PNG visualizations for failure analysis. ## Table of contents * [Getting started](#getting-started) * [Usage](#usage) +* [Audio framework integration](#audio-framework-integration) * [Contributing](#contributing) * [License](#license) * [Credits](#credits) @@ -36,37 +34,95 @@ dependencies: [ ```swift import AudioSnapshotTesting -import XCTest - -class MyAudioTests: XCTestCase { - func testAudioProcessing() { - let buffer = // your AVAudioPCMBuffer - assertSnapshot( - of: buffer, - as: .waveform(width: 3000, height: 800) - ) - } +import Testing + +@Test +func testAudioProcessing() async { + let buffer = // your AVAudioPCMBuffer + await assertAudioSnapshot(of: buffer, named: "my-audio") +} +``` + +### With Visualization Strategy + +```swift +@Test(.audioSnapshot(strategy: .waveform(width: 3000, height: 800))) +func testWithVisualization() async { + let buffer = // your AVAudioPCMBuffer + await assertAudioSnapshot(of: buffer, named: "waveform") +} +``` + +### Recording Mode + +```swift +@Test(.audioSnapshot(record: true)) +func testRecording() async { + let buffer = // your AVAudioPCMBuffer + await assertAudioSnapshot(of: buffer, named: "recording") +} +``` + +### Multiple Buffers (Overlay) + +```swift +@Test(.audioSnapshot(strategy: .waveform(width: 1000, height: 300))) +func testOverlay() async { + let buffer1 = // first AVAudioPCMBuffer + let buffer2 = // second AVAudioPCMBuffer + await assertAudioSnapshot(of: (buffer1, buffer2), named: "overlay") } ``` -Snapshot audio tests are snapshot tested itself. Please find many examples in: [AudioSnapshotTestingTests.swift](Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift) +More examples can be found in: [AudioSnapshotTestingTests.swift](Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift) + +### Requirements + +- iOS 16+ / macOS 13+ (for full visualization support) +- Swift 6.2+ (for Swift Testing `Attachment` API) +- No external dependencies + +### Snapshot Format + +Snapshots are stored in `__AudioSnapshots__/` directory (adjacent to test files) as ALAC-encoded CAF files: +- Single buffer: `__AudioSnapshots__/{TestFile}/snapshot-name.caf` +- Multiple buffers: `__AudioSnapshots__/{TestFile}/snapshot-name.1.caf`, `snapshot-name.2.caf`, etc. ### Features -- [x] `AVAudioPCMBuffer` waveform snapshots -- [x] `AVAudioPCMBuffer` overlayed waveform snapshots -- [x] Spectrogram -- [x] Spectra -- [x] Different waveform rendering strategies -- [x] Test against other reference implementations and with known audio files -- [ ] Documentation -- [ ] Mention JUCE -- [ ] Link blog post and talk -- [ ] review stashes -- [x] Add a link to swift snapshot testing -- [ ] Multi level comparison (first hash, then data, then image) -- [ ] Use accelerate in downsampling -- [x] Add file strategy +- [x] **Swift Testing integration** - Works with Swift Testing framework via `AudioSnapshotTrait` +- [x] **Waveform snapshots** - Visualize `AVAudioPCMBuffer` as waveforms +- [x] **Multiple buffer overlays** - Compare and overlay multiple buffers with different colors +- [x] **Spectrogram visualization** - Time-frequency representation of audio +- [x] **Spectrum visualization** - Frequency domain analysis +- [x] **ALAC compression** - Snapshots stored as lossless ALAC-encoded CAF files +- [x] **Configurable bit depth** - 16-bit (default) or 32-bit ALAC encoding +- [x] **Auto-recording** - Missing snapshots are automatically recorded (test still fails to prevent CI passing) +- [x] **Visualization on failure** - Optional PNG visualizations generated on comparison failures +- [x] **macOS auto-open** - Optional automatic opening of visualizations in Preview + +## Audio Framework Integration + +AudioSnapshotTesting can be used with any testing framework or audio library that can output `AVAudioPCMBuffer`. +### AudioKit (Offline Rendering) + +```swift +import AudioSnapshotTesting +import AudioKit +import Testing + +@Test(.audioSnapshot(strategy: .waveform(width: 3000, height: 800))) +func testAudioKitOfflineRendering() async throws { + var oscillator = Oscillator(frequency: 440) + var engine = AudioEngine() + engine.output = oscillator + + var data = try engine.startTest(totalDuration: 5) + data.append(engine.render(duration: 5) + + await assertAudioSnapshot(of: data, named: "audiokit-sine-440hz") +} +``` ## Contributing diff --git a/Sources/AudioSnapshotTesting/AudioSnapshotTesting.swift b/Sources/AudioSnapshotTesting/AudioSnapshotTesting.swift deleted file mode 100644 index 4502dcb..0000000 --- a/Sources/AudioSnapshotTesting/AudioSnapshotTesting.swift +++ /dev/null @@ -1,205 +0,0 @@ -import AVFAudio -import SwiftUI -import Accelerate - -@_exported import SnapshotTesting - -/// Specifies the amplitude scaling method for spectrograms. -public enum AmplitudeScale { - /// Linear amplitude scale (raw FFT magnitudes normalized to 0...1) - case linear - /// Logarithmic amplitude scale in decibels with specified dynamic range - /// - Parameter range: The dB range from minimum to maximum (e.g., 120 means -120dB to 0dB) - case logarithmic(range: Float) -} - -#if os(macOS) -public typealias PlatformImage = NSImage -typealias PlatformView = NSView -typealias PlatformColor = NSColor -typealias PlatformHostingView = NSHostingView -#elseif os(iOS) -public typealias PlatformImage = UIImage -typealias PlatformView = UIView -typealias PlatformColor = UIColor -typealias PlatformHostingView = _UIHostingView -#endif - -/// Generates a audio file snapshot of the given `AVAudioPCMBuffer. -extension Snapshotting where Value == AVAudioPCMBuffer, Format == Data { - public static func audio() -> Snapshotting { - return Snapshotting( - pathExtension: "wav", - diffing: Diffing( - toData: { $0 }, - fromData: { $0 }, - diff: { old, new in - old == new ? nil : ("Audio buffers differ", []) - } - ), - asyncSnapshot: { buffer in - Async { callback in - let tempURL = URL(fileURLWithPath: NSTemporaryDirectory()) - .appendingPathComponent(UUID().uuidString) - .appendingPathExtension("wav") - - do { - let file = try AVAudioFile(forWriting: tempURL, settings: buffer.format.settings) - try file.write(from: buffer) - if #available(iOS 18.0, macOS 15, *) { - file.close() - } - defer { - try? FileManager.default.removeItem(at: tempURL) - } - - let data = try Data(contentsOf: tempURL) - callback(data) - } catch { - fatalError("Failed to create audio snapshot: \(error)") - } - } - } - ) - } -} - -@MainActor -public extension Snapshotting where Format == PlatformImage, Value == (AVAudioPCMBuffer, AVAudioPCMBuffer) { - /// Generates a overlayed waveform snapshot of the given tuple of `AVAudioPCMBuffer`s. - /// - Parameters: - /// - width: The width of the resulting image. - /// - height: The height of the resulting image. - /// - strategy: The strategy to use when generating the waveform. Defaults to `.joinedLines`. - /// - mono: A boolean indicating whether to mix down to a mono signal before generating the waveform. Defaults to `true`. - static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines, mono: Bool = true) -> Snapshotting { - Snapshotting.image(size: .init(width: width, height: height)) - .pullback { buffer1, buffer2 in - let verticalPadding: CGFloat = 4 - let waveformHeight = CGFloat(height) - (verticalPadding * 2) - let waveform1 = MultiChannelWaveformView( - data: buffer1.reduce(bucketCount: width, mono: mono), - height: waveformHeight, - color: .red, - strategy: strategy - ) - let waveform2 = MultiChannelWaveformView( - data: buffer2.reduce(bucketCount: width, mono: mono), - height: waveformHeight, - color: .green, - strategy: strategy - ) - let waveform = ZStack { - waveform1 - waveform2 - } - .padding(.vertical, verticalPadding) - .background(Color.black) - return PlatformHostingView(rootView: waveform.environment(\.colorScheme, .light)) - } - } -} - -@MainActor -public extension Snapshotting where Format == PlatformImage, Value == AVAudioPCMBuffer { - /// Generates a waveform snapshot of the given `AVAudioPCMBuffer`. - /// - Parameters: - /// - width: The width of the resulting image. - /// - height: The height of the resulting image. - /// - strategy: The strategy to use when generating the waveform. Defaults to `.joinedLines`. - /// - mono: A boolean indicating whether to mix down to a mono signal before generating the waveform. Defaults to `true`. - static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines, mono: Bool = true) -> Snapshotting { - Snapshotting.image(size: .init(width: width, height: height)) - .pullback { buffer in - let verticalPadding: CGFloat = 4 - let waveformHeight = CGFloat(height) - (verticalPadding * 2) - let waveform = MultiChannelWaveformView( - data: buffer.reduce(bucketCount: width, mono: mono), - height: waveformHeight, - color: .red, - strategy: strategy - ) - .padding(.vertical, verticalPadding) - .background(Color.black) - return PlatformHostingView(rootView: waveform.environment(\.colorScheme, .light)) - } - } - - /// Generates a frequency spectrum of the given `AVAudioPCMBuffer`. - /// - Parameters: - /// - width: The width of the resulting image. - /// - height: The height of the resulting image. - /// - window: An optional array of floats representing the window function to apply before computing the FFT. If not provided, a Hann window will be used by default. - /// - threshold: A float value between 0 and 1 that determines the minimum amplitude required for a frequency bin to be included in the resulting image. - @available(iOS 16, macOS 13, *) - static func spectrum( - width: Int, - height: Int, - window: [Float]? = nil, - threshold: Float = 0.005 - ) -> Snapshotting { - Snapshotting.image(size: .init(width: width, height: height)) - .pullback { buffer in - let effectiveWindow = window ?? createHannWindow(size: Int(buffer.frameLength)) - let data = buffer - .spectrum(window: effectiveWindow) - .filter { $0.amplitude > threshold } - let spectrum = SpectrumView(data: data, height: CGFloat(height)) - return PlatformHostingView(rootView: spectrum.environment(\.colorScheme, .light)) - } - } - - /// Generates a spectrogram of the given `AVAudioPCMBuffer`. - /// - Parameters: - /// - hopSize: The number of audio frames between successive spectral frames. - /// - frequencyCount: The number of frequency bins to include in each spectral frame. - /// - window: An optional array of floats representing the window function to apply before computing the FFT. If not provided, a Hann window will be used by default. - /// - amplitudeScale: The amplitude scaling method. Defaults to `.logarithmic(range: 120)` for standard 120 dB dynamic range. Use `.linear` for raw FFT magnitudes. - /// - imageWidth: The width of the resulting snapshot image in pixels. Defaults to 1000. - @available(iOS 16, macOS 13, *) - static func spectrogram( - hopSize: Int, - frequencyCount: Int, - window: [Float]? = nil, - amplitudeScale: AmplitudeScale = .logarithmic(range: 120), - imageWidth: Int = 1000 - ) -> Snapshotting { - let height = frequencyCount - return Snapshotting.image(size: .init(width: imageWidth, height: height)) - .pullback { buffer in - let fftSize = frequencyCount * 2 - let lastBucketStart = Int(buffer.frameLength) - fftSize - - // Calculate number of FFT windows that fit in the buffer - // We need at least fftSize samples for each window, starting at position 0 - // The last window starts at (frameLength - fftSize), and we hop by hopSize - // +1 accounts for the initial window at position 0 - let width = 1 + (lastBucketStart / hopSize) - - let effectiveWindow = window ?? createHannWindow(size: fftSize) - let data = buffer.spectrogram(fftSize: fftSize, hopSize: hopSize, width: width, window: effectiveWindow, amplitudeScale: amplitudeScale) - - // Calculate bin size and max frequency based on sample rate and frequency count - let binSize = buffer.format.sampleRate / Double(fftSize) - let maxFrequency = Int(binSize * Double(frequencyCount)) - - // Calculate duration from buffer length and sample rate - let duration = Double(buffer.frameLength) / buffer.format.sampleRate - - let spectrogram = SpectrogramView(data: data, width: width, height: height, maxFrequency: maxFrequency, duration: duration) - return PlatformHostingView(rootView: spectrogram.environment(\.colorScheme, .light)) - } - } -} - -/// Creates a normalized Hann window of the specified size -/// - Parameter size: The number of samples in the window -/// - Returns: An array of Float values representing the Hann window function -private func createHannWindow(size: Int) -> [Float] { - vDSP.window( - ofType: Float.self, - usingSequence: .hanningNormalized, - count: size, - isHalfWindow: false - ) -} diff --git a/Sources/AudioSnapshotTesting/Core/AmplitudeScale.swift b/Sources/AudioSnapshotTesting/Core/AmplitudeScale.swift new file mode 100644 index 0000000..26c67fb --- /dev/null +++ b/Sources/AudioSnapshotTesting/Core/AmplitudeScale.swift @@ -0,0 +1,8 @@ +/// Specifies the amplitude scaling method for spectrograms. +public enum AmplitudeScale: Sendable { + /// Linear amplitude scale (raw FFT magnitudes normalized to 0...1) + case linear + /// Logarithmic amplitude scale in decibels with specified dynamic range + /// - Parameter range: The dB range from minimum to maximum (e.g., 120 means -120dB to 0dB) + case logarithmic(range: Float) +} diff --git a/Sources/AudioSnapshotTesting/Core/AudioSnapshotTesting.swift b/Sources/AudioSnapshotTesting/Core/AudioSnapshotTesting.swift new file mode 100644 index 0000000..b8dda5a --- /dev/null +++ b/Sources/AudioSnapshotTesting/Core/AudioSnapshotTesting.swift @@ -0,0 +1,227 @@ +import AVFAudio +import Foundation +import SwiftUI +import Testing + +#if os(macOS) +import AppKit +#endif + +/// Asserts that an audio buffer matches a recorded snapshot. +/// +/// - Parameters: +/// - buffer: The audio buffer to snapshot. +/// - name: The name for the snapshot file (without extension). +/// - sourceLocation: The source location (automatically captured). +public func assertAudioSnapshot( + of buffer: AVAudioPCMBuffer, + named name: String, + sourceLocation: SourceLocation = #_sourceLocation +) async { + await assertAudioSnapshot( + of: [buffer], + named: name, + sourceLocation: sourceLocation + ) +} + +/// Asserts that multiple audio buffers match recorded snapshots. +/// +/// - Parameters: +/// - buffers: The audio buffers to snapshot. +/// - name: The name for the snapshot file (without extension). +/// - sourceLocation: The source location (automatically captured). +public func assertAudioSnapshot( + of buffers: [AVAudioPCMBuffer], + named name: String, + sourceLocation: SourceLocation = #_sourceLocation +) async { + let trait = AudioSnapshotConfiguration.current ?? AudioSnapshotTrait() + + do { + try await performSnapshot( + buffers: buffers, + trait: trait, + name: name, + sourceLocation: sourceLocation + ) + } catch { + Issue.record("Snapshot error: \(error.localizedDescription)", sourceLocation: sourceLocation) + } +} + +private struct SnapshotContext { + let directory: URL + let sourceLocation: SourceLocation + let name: String + let trait: AudioSnapshotTrait + + init( + name: String, + trait: AudioSnapshotTrait, + sourceLocation: SourceLocation + ) { + self.directory = SnapshotFileManager.snapshotDirectory(filePath: sourceLocation._filePath) + self.sourceLocation = sourceLocation + self.name = name + self.trait = trait + } + + func snapshotPath(index: Int, count: Int) -> URL { + let suffix = count > 1 ? ".\(index + 1)" : "" + let fileName = "\(name)\(suffix).caf" + return SnapshotFileManager.snapshotPath(directory: directory, fileName: fileName) + } + + func visualizationPath() -> URL { + SnapshotFileManager.temporaryFilePath(fileName: "\(name).png") + } +} + +private func performSnapshot( + buffers: [AVAudioPCMBuffer], + trait: AudioSnapshotTrait, + name: String, + sourceLocation: SourceLocation +) async throws { + guard !buffers.isEmpty else { return } + + let context = SnapshotContext( + name: name, + trait: trait, + sourceLocation: sourceLocation + ) + + let missingSnapshots = findMissingSnapshots(buffers: buffers, context: context) + let shouldRecord = trait.record || !missingSnapshots.isEmpty + + if shouldRecord { + try await recordSnapshots( + buffers: buffers, + context: context, + explicit: trait.record, + missingIndices: missingSnapshots + ) + } else { + try await verifySnapshots(buffers: buffers, context: context) + } +} + +private func findMissingSnapshots(buffers: [AVAudioPCMBuffer], context: SnapshotContext) -> [Int] { + var missing: [Int] = [] + for (index, _) in buffers.enumerated() { + let path = context.snapshotPath(index: index, count: buffers.count) + if !SnapshotFileManager.fileExists(at: path) { + missing.append(index) + } + } + return missing +} + +private func recordSnapshots( + buffers: [AVAudioPCMBuffer], + context: SnapshotContext, + explicit: Bool, + missingIndices: [Int] +) async throws { + try SnapshotFileManager.createDirectoryIfNeeded(context.directory) + + let bufferCount = buffers.count + let indicesToRecord = explicit ? Array(buffers.indices) : missingIndices + + for index in indicesToRecord { + let path = context.snapshotPath(index: index, count: bufferCount) + try AudioFileWriter.write(buffer: buffers[index], to: path, bitDepth: context.trait.bitDepth) + } + + var message: String + if explicit { + message = "Recorded snapshot(s). Re-run with record: false to verify." + } else { + message = bufferCount > 1 + ? "Recorded missing snapshot(s) for buffer(s): \(missingIndices.map { $0 + 1 }.map(String.init).joined(separator: ", ")). Re-run to verify." + : "No reference snapshot found. Recorded new snapshot. Re-run to verify." + } + + if let strategy = context.trait.strategy { + let visualizationMessage = try await generateVisualization( + buffers: buffers, + strategy: strategy, + context: context + ) + message += visualizationMessage + } + Issue.record(Comment(rawValue: message), sourceLocation: context.sourceLocation) +} + +private func verifySnapshots(buffers: [AVAudioPCMBuffer], context: SnapshotContext) async throws { + let diffs = try compareSnapshots(buffers: buffers, context: context) + guard !diffs.isEmpty else { return } + + let message = try await buildFailureMessage( + buffers: buffers, + diffs: diffs, + context: context + ) + Issue.record(Comment(rawValue: message), sourceLocation: context.sourceLocation) +} + +private func compareSnapshots(buffers: [AVAudioPCMBuffer], context: SnapshotContext) throws -> [(index: Int, message: String)] { + var diffs: [(index: Int, message: String)] = [] + + for (index, buffer) in buffers.enumerated() { + let path = context.snapshotPath(index: index, count: buffers.count) + if let diffMessage = try AudioDataComparator.compare(expectedURL: path, actual: buffer, bitDepth: context.trait.bitDepth) { + diffs.append((index, diffMessage)) + } + } + + return diffs +} + +private func buildFailureMessage( + buffers: [AVAudioPCMBuffer], + diffs: [(index: Int, message: String)], + context: SnapshotContext +) async throws -> String { + let bufferCount = buffers.count + var message = bufferCount > 1 ? "Audio snapshots differ." : diffs[0].message + + if bufferCount > 1 { + for diff in diffs { + message += "\nBuffer \(diff.index + 1): \(diff.message)" + } + } + + if let strategy = context.trait.strategy { + let visualizationMessage = try await generateVisualization( + buffers: buffers, + strategy: strategy, + context: context + ) + message += "\nFailure visualization:" + visualizationMessage + } + + return message +} + +private func generateVisualization( + buffers: [AVAudioPCMBuffer], + strategy: VisualisationStrategy, + context: SnapshotContext +) async throws -> String { + let visualData = try await VisualizationGenerator.generateImage(for: buffers, strategy: strategy) + let tempPath = context.visualizationPath() + try SnapshotFileManager.writeFile(visualData, to: tempPath) + Attachment.record(visualData, named: "\(context.name).png", sourceLocation: context.sourceLocation) + + #if os(macOS) + // During developemnt, it is useful to auto open + // generated file for easy inspection + if context.trait.autoOpen { + NSWorkspace.shared.open(tempPath) + } + #endif + + return " file://\(tempPath.path)" +} diff --git a/Sources/AudioSnapshotTesting/Core/AudioSnapshotTrait.swift b/Sources/AudioSnapshotTesting/Core/AudioSnapshotTrait.swift new file mode 100644 index 0000000..c00eaa7 --- /dev/null +++ b/Sources/AudioSnapshotTesting/Core/AudioSnapshotTrait.swift @@ -0,0 +1,65 @@ +import Testing + +/// Bit depth for ALAC audio encoding. +public enum AudioBitDepth: Int, Sendable { + case bits16 = 16 + case bits32 = 32 +} + +/// A test trait that configures audio snapshot testing behavior. +public struct AudioSnapshotTrait: TestTrait, SuiteTrait, TestScoping { + /// Whether to record new snapshots instead of comparing. + public let record: Bool + + /// The snapshot strategy to use for failure visualization. + public let strategy: VisualisationStrategy? + + /// Whether to automatically open generated visualizations (macOS only). + public let autoOpen: Bool + + /// The bit depth for ALAC encoding. Defaults to 16-bit. + public let bitDepth: AudioBitDepth + + /// Creates a new audio snapshot trait. + /// - Parameters: + /// - record: Whether to record new snapshots. Defaults to `false`. + /// - strategy: The snapshot strategy for failure visualization. Defaults to `nil`. + /// - autoOpen: Whether to automatically open visualizations. Defaults to `false`. + /// - bitDepth: The bit depth for ALAC encoding. Defaults to `.bits16`. + public init(record: Bool = false, strategy: VisualisationStrategy? = nil, autoOpen: Bool = false, bitDepth: AudioBitDepth = .bits16) { + self.record = record + self.strategy = strategy + self.autoOpen = autoOpen + self.bitDepth = bitDepth + } + + /// Called by Swift Testing to set up the test scope. + public func provideScope(for test: Test, testCase: Test.Case?, performing function: @Sendable () async throws -> Void) async throws { + try await AudioSnapshotConfiguration.$current.withValue(self) { + try await function() + } + } +} + +extension Trait where Self == AudioSnapshotTrait { + /// Configures audio snapshot testing for a test. + /// - Parameters: + /// - record: Whether to record new snapshots. Defaults to `false`. + /// - strategy: The snapshot strategy for failure visualization. Defaults to `nil`. + /// - autoOpen: Whether to automatically open visualizations. Defaults to `false`. + /// - bitDepth: The bit depth for ALAC encoding. Defaults to `.bits16`. + /// - Returns: An `AudioSnapshotTrait` configured with the specified options. + public static func audioSnapshot( + record: Bool = false, + strategy: VisualisationStrategy? = nil, + autoOpen: Bool = false, + bitDepth: AudioBitDepth = .bits16 + ) -> AudioSnapshotTrait { + AudioSnapshotTrait(record: record, strategy: strategy, autoOpen: autoOpen, bitDepth: bitDepth) + } +} + +/// Task-local storage for audio snapshot configuration. +public enum AudioSnapshotConfiguration { + @TaskLocal public static var current: AudioSnapshotTrait? +} diff --git a/Sources/AudioSnapshotTesting/Core/VisualisationStrategy.swift b/Sources/AudioSnapshotTesting/Core/VisualisationStrategy.swift new file mode 100644 index 0000000..5319eb5 --- /dev/null +++ b/Sources/AudioSnapshotTesting/Core/VisualisationStrategy.swift @@ -0,0 +1,29 @@ +/// Defines the visualization strategy for audio snapshot failures. +public enum VisualisationStrategy: Sendable { + /// Waveform visualization. Supports both single and multiple buffers (overlaid with different colors). + /// - Parameters: + /// - width: The width of the resulting image. + /// - height: The height of the resulting image. + /// - strategy: The strategy to use when generating the waveform. Defaults to `.joinedLines`. + /// - mono: Whether to mix down to a mono signal. Defaults to `true`. + case waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines, mono: Bool = true) + + /// Frequency spectrum visualization. + /// - Parameters: + /// - width: The width of the resulting image. + /// - height: The height of the resulting image. + /// - window: Optional window function for FFT. Uses Hann window if not provided. + /// - threshold: Minimum amplitude threshold for frequency bins. Defaults to `0.005`. + /// - Note: Requires iOS 16+ or macOS 13+. + case spectrum(width: Int, height: Int, window: [Float]? = nil, threshold: Float = 0.005) + + /// Spectrogram visualization. + /// - Parameters: + /// - hopSize: The number of audio frames between successive spectral frames. + /// - frequencyCount: The number of frequency bins to include in each spectral frame. + /// - window: Optional window function for FFT. Uses Hann window if not provided. + /// - amplitudeScale: The amplitude scaling method. Defaults to `.logarithmic(range: 120)`. + /// - imageWidth: The width of the resulting image. Defaults to `1000`. + /// - Note: Requires iOS 16+ or macOS 13+. + case spectrogram(hopSize: Int, frequencyCount: Int, window: [Float]? = nil, amplitudeScale: AmplitudeScale = .logarithmic(range: 120), imageWidth: Int = 1000) +} diff --git a/Sources/AudioSnapshotTesting/Errors.swift b/Sources/AudioSnapshotTesting/Errors.swift new file mode 100644 index 0000000..5a4e8ff --- /dev/null +++ b/Sources/AudioSnapshotTesting/Errors.swift @@ -0,0 +1,47 @@ +import Foundation + +/// Error type for audio data comparison operations. +enum AudioComparisonError: Error { + case bufferCreationFailed + case formatCreationFailed + case converterCreationFailed +} + +extension AudioComparisonError: LocalizedError { + var errorDescription: String? { + switch self { + case .bufferCreationFailed: + return "Failed to create audio buffer" + case .formatCreationFailed: + return "Failed to create audio format" + case .converterCreationFailed: + return "Failed to create audio converter" + } + } +} + +/// Error type for visualization generation operations. +enum VisualizationError: Error { + case renderingFailed + case noBuffers + case invalidDimensions(width: Int, height: Int) + case dimensionsTooLarge(width: Int, height: Int, max: Int) + case rendererFailed(reason: String) +} + +extension VisualizationError: LocalizedError { + var errorDescription: String? { + switch self { + case .renderingFailed: + return "Failed to render visualization" + case .noBuffers: + return "No audio buffers provided for visualization" + case .invalidDimensions(let width, let height): + return "Invalid visualization dimensions: \(width)×\(height) (must be greater than 0)" + case .dimensionsTooLarge(let width, let height, let max): + return "Visualization dimensions too large: \(width)×\(height) (maximum: \(max)×\(max))" + case .rendererFailed(let reason): + return "Visualization rendering failed: \(reason)" + } + } +} diff --git a/Sources/AudioSnapshotTesting/AVAudioPCMBufferExtensions.swift b/Sources/AudioSnapshotTesting/Internal/AVAudioPCMBufferExtensions.swift similarity index 100% rename from Sources/AudioSnapshotTesting/AVAudioPCMBufferExtensions.swift rename to Sources/AudioSnapshotTesting/Internal/AVAudioPCMBufferExtensions.swift diff --git a/Sources/AudioSnapshotTesting/Internal/AudioDataComparator.swift b/Sources/AudioSnapshotTesting/Internal/AudioDataComparator.swift new file mode 100644 index 0000000..77a2e34 --- /dev/null +++ b/Sources/AudioSnapshotTesting/Internal/AudioDataComparator.swift @@ -0,0 +1,141 @@ +import AVFAudio +import Foundation + +/// Handles audio buffer comparison. +enum AudioDataComparator { + /// Compares an audio buffer against a reference file. + /// - Parameters: + /// - expectedURL: The reference audio file URL. + /// - actual: The actual audio buffer to compare. + /// - bitDepth: The bit depth used for ALAC encoding. + /// - Returns: `nil` if they match, or an error message if they differ. + static func compare(expectedURL: URL, actual: AVAudioPCMBuffer, bitDepth: AudioBitDepth) throws -> String? { + let expected = try readBuffer(from: expectedURL) + + // Quantize the actual buffer to match ALAC's integer conversion + let quantizedActual = try quantize(actual, bitDepth: bitDepth) + + if expected.format.sampleRate != quantizedActual.format.sampleRate { + return "Sample rate mismatch: expected \(expected.format.sampleRate), got \(quantizedActual.format.sampleRate)" + } + + if expected.format.channelCount != quantizedActual.format.channelCount { + return "Channel count mismatch: expected \(expected.format.channelCount), got \(quantizedActual.format.channelCount)" + } + + if expected.frameLength != quantizedActual.frameLength { + return "Frame length mismatch: expected \(expected.frameLength), got \(quantizedActual.frameLength)" + } + + guard let expectedData = expected.floatChannelData, + let actualData = quantizedActual.floatChannelData else { + return "Unable to access audio data" + } + + let channelCount = Int(expected.format.channelCount) + let frameLength = Int(expected.frameLength) + let byteCount = frameLength * MemoryLayout.size + + for channel in 0.. AVAudioPCMBuffer { + let intFormat: AVAudioFormat? + switch bitDepth { + case .bits16: + intFormat = AVAudioFormat( + commonFormat: .pcmFormatInt16, + sampleRate: buffer.format.sampleRate, + channels: buffer.format.channelCount, + interleaved: false + ) + case .bits32: + intFormat = AVAudioFormat( + commonFormat: .pcmFormatInt32, + sampleRate: buffer.format.sampleRate, + channels: buffer.format.channelCount, + interleaved: false + ) + } + + guard let intFormat else { + throw AudioComparisonError.formatCreationFailed + } + + guard let floatToInt = AVAudioConverter(from: buffer.format, to: intFormat), + let intBuffer = AVAudioPCMBuffer(pcmFormat: intFormat, frameCapacity: buffer.frameLength) else { + throw AudioComparisonError.converterCreationFailed + } + + try floatToInt.convert(to: intBuffer, from: buffer) + + guard let intToFloat = AVAudioConverter(from: intFormat, to: buffer.format), + let quantizedBuffer = AVAudioPCMBuffer(pcmFormat: buffer.format, frameCapacity: buffer.frameLength) else { + throw AudioComparisonError.converterCreationFailed + } + + try intToFloat.convert(to: quantizedBuffer, from: intBuffer) + + return quantizedBuffer + } + + /// Reads an audio file into a buffer. + /// Works around Apple bug FB12754494 by reading in chunks. + private static func readBuffer(from url: URL) throws -> AVAudioPCMBuffer { + let file = try AVAudioFile(forReading: url) + let totalFrames = AVAudioFrameCount(file.length) + + guard let outputBuffer = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity: totalFrames) else { + throw AudioComparisonError.bufferCreationFailed + } + + // Temporary buffer for chunked reading + let chunkSize: AVAudioFrameCount = 8192 + guard let chunkBuffer = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity: chunkSize) else { + throw AudioComparisonError.bufferCreationFailed + } + + guard let outputData = outputBuffer.floatChannelData else { + throw AudioComparisonError.bufferCreationFailed + } + + let channelCount = Int(file.processingFormat.channelCount) + var framesRead: AVAudioFrameCount = 0 + + while file.framePosition < file.length { + try file.read(into: chunkBuffer) + + // Copy chunk data to output buffer + if let chunkData = chunkBuffer.floatChannelData { + for channel in 0.. URL { + let fileURL = URL(fileURLWithPath: filePath) + let fileName = fileURL.deletingPathExtension().lastPathComponent + let testFileDirectory = fileURL.deletingLastPathComponent() + + // Create __AudioSnapshots__/{TestFileName}/ path + return testFileDirectory + .appendingPathComponent("__AudioSnapshots__") + .appendingPathComponent(fileName) + } + + /// Returns the full path to a snapshot file. + /// - Parameters: + /// - directory: The snapshot directory URL. + /// - fileName: The snapshot file name. + /// - Returns: The full URL to the snapshot file. + static func snapshotPath(directory: URL, fileName: String) -> URL { + directory.appendingPathComponent(fileName) + } + + /// Creates the snapshot directory if it doesn't exist. + /// - Parameter directory: The directory URL to create. + static func createDirectoryIfNeeded(_ directory: URL) throws { + try FileManager.default.createDirectory(at: directory, withIntermediateDirectories: true) + } + + /// Checks if a file exists at the given path. + /// - Parameter path: The file URL to check. + /// - Returns: `true` if the file exists. + static func fileExists(at path: URL) -> Bool { + FileManager.default.fileExists(atPath: path.path) + } + + /// Reads data from a file. + /// - Parameter path: The file URL to read. + /// - Returns: The file contents as `Data`. + static func readFile(at path: URL) throws -> Data { + try Data(contentsOf: path) + } + + /// Writes data to a file. + /// - Parameters: + /// - data: The data to write. + /// - path: The file URL to write to. + static func writeFile(_ data: Data, to path: URL) throws { + try data.write(to: path) + } + + /// Generates a temporary file path for failure visualization. + /// - Parameter fileName: The file name including extension. + /// - Returns: A URL in the temporary directory. + static func temporaryFilePath(fileName: String) -> URL { + URL(fileURLWithPath: NSTemporaryDirectory()) + .appendingPathComponent(fileName) + } +} diff --git a/Sources/AudioSnapshotTesting/MultiChannelWaveformView.swift b/Sources/AudioSnapshotTesting/Visualization/MultiChannelWaveformView.swift similarity index 100% rename from Sources/AudioSnapshotTesting/MultiChannelWaveformView.swift rename to Sources/AudioSnapshotTesting/Visualization/MultiChannelWaveformView.swift diff --git a/Sources/AudioSnapshotTesting/SpectrogramView.swift b/Sources/AudioSnapshotTesting/Visualization/SpectrogramView.swift similarity index 100% rename from Sources/AudioSnapshotTesting/SpectrogramView.swift rename to Sources/AudioSnapshotTesting/Visualization/SpectrogramView.swift diff --git a/Sources/AudioSnapshotTesting/SpectrumView.swift b/Sources/AudioSnapshotTesting/Visualization/SpectrumView.swift similarity index 94% rename from Sources/AudioSnapshotTesting/SpectrumView.swift rename to Sources/AudioSnapshotTesting/Visualization/SpectrumView.swift index 5d68a43..e67fccc 100644 --- a/Sources/AudioSnapshotTesting/SpectrumView.swift +++ b/Sources/AudioSnapshotTesting/Visualization/SpectrumView.swift @@ -38,8 +38,9 @@ struct SpectrumView: View { } private var xAxisValues: [String] { + guard !data.isEmpty else { return [] } let count = data.count - let numberOfLabels = min(10, data.count) + let numberOfLabels = min(10, count) guard numberOfLabels > 1 else { return [data[0].frequencyString] } let spacing = Float(count - 1) / Float(numberOfLabels - 1) let points = (0.. Data { + switch strategy { + case .waveform(let width, let height, let waveformStrategy, let mono): + return try await renderWaveform(buffers: buffers, width: width, height: height, strategy: waveformStrategy, mono: mono) + + case .spectrum(let width, let height, let window, let threshold): + guard let buffer = buffers.first else { throw VisualizationError.noBuffers } + if #available(iOS 16, macOS 13, *) { + return try await renderSpectrum(buffer: buffer, width: width, height: height, window: window, threshold: threshold) + } else { + fatalError("Spectrum visualization requires iOS 16+ or macOS 13+") + } + + case .spectrogram(let hopSize, let frequencyCount, let window, let amplitudeScale, let imageWidth): + guard let buffer = buffers.first else { throw VisualizationError.noBuffers } + if #available(iOS 16, macOS 13, *) { + return try await renderSpectrogram( + buffer: buffer, + hopSize: hopSize, + frequencyCount: frequencyCount, + window: window, + amplitudeScale: amplitudeScale, + imageWidth: imageWidth + ) + } else { + fatalError("Spectrogram visualization requires iOS 16+ or macOS 13+") + } + } + } + + private static let overlayColors: [Color] = [.red, .green, .blue, .orange, .purple, .cyan] + + private static func renderWaveform( + buffers: [AVAudioPCMBuffer], + width: Int, + height: Int, + strategy: WaveformStrategy, + mono: Bool + ) async throws -> Data { + guard !buffers.isEmpty else { throw VisualizationError.noBuffers } + + let verticalPadding: CGFloat = 4 + let waveformHeight = CGFloat(height) - (verticalPadding * 2) + + let waveformViews = buffers.enumerated().map { index, buffer in + MultiChannelWaveformView( + data: buffer.reduce(bucketCount: width, mono: mono), + height: waveformHeight, + color: overlayColors[index % overlayColors.count], + strategy: strategy + ) + } + + let waveform = ZStack { + ForEach(Array(waveformViews.enumerated()), id: \.offset) { _, view in + view + } + } + .padding(.vertical, verticalPadding) + .background(Color.black) + + return try await MainActor.run { try renderView(waveform, width: width, height: height) } + } + + @available(iOS 16, macOS 13, *) + private static func renderSpectrum(buffer: AVAudioPCMBuffer, width: Int, height: Int, window: [Float]?, threshold: Float) async throws -> Data { + let effectiveWindow = window ?? createHannWindow(size: Int(buffer.frameLength)) + let data = buffer + .spectrum(window: effectiveWindow) + .filter { $0.amplitude > threshold } + let spectrum = SpectrumView(data: data, height: CGFloat(height)) + + return try await renderView(spectrum, width: width, height: height) + } + + @available(iOS 16, macOS 13, *) + private static func renderSpectrogram(buffer: AVAudioPCMBuffer, hopSize: Int, frequencyCount: Int, window: [Float]?, amplitudeScale: AmplitudeScale, imageWidth: Int) async throws -> Data { + let fftSize = frequencyCount * 2 + let lastBucketStart = Int(buffer.frameLength) - fftSize + let width = 1 + (lastBucketStart / hopSize) + + let effectiveWindow = window ?? createHannWindow(size: fftSize) + let data = buffer.spectrogram(fftSize: fftSize, hopSize: hopSize, width: width, window: effectiveWindow, amplitudeScale: amplitudeScale) + + let binSize = buffer.format.sampleRate / Double(fftSize) + let maxFrequency = Int(binSize * Double(frequencyCount)) + let duration = Double(buffer.frameLength) / buffer.format.sampleRate + + return try await MainActor.run { + let spectrogram = SpectrogramView( + data: data, + width: width, + height: frequencyCount, + maxFrequency: maxFrequency, + duration: duration + ) + + return try renderView(spectrogram, width: imageWidth, height: frequencyCount) + } + } + + @MainActor + private static func renderView(_ view: V, width: Int, height: Int) throws -> Data { + guard width > 0 && height > 0 else { + throw VisualizationError.invalidDimensions(width: width, height: height) + } + + let maxDimension = 100_000 + guard width <= maxDimension && height <= maxDimension else { + throw VisualizationError.dimensionsTooLarge(width: width, height: height, max: maxDimension) + } + + let size = CGSize(width: width, height: height) + + if #available(iOS 16, macOS 13, *) { + let renderer = ImageRenderer(content: view.frame(width: size.width, height: size.height)) + renderer.scale = 1.0 + + guard let context = CGContext( + data: nil, + width: width, + height: height, + bitsPerComponent: 8, + bytesPerRow: width * 4, + space: CGColorSpaceCreateDeviceRGB(), + bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue + ) else { + throw VisualizationError.rendererFailed(reason: "Failed to create CGContext") + } + + renderer.render { size, renderInContext in renderInContext(context) } + + guard let cgImage = context.makeImage() else { + throw VisualizationError.rendererFailed(reason: "Failed to create CGImage from context") + } + + let data = NSMutableData() + guard let destination = CGImageDestinationCreateWithData(data, "public.png" as CFString, 1, nil) else { + throw VisualizationError.rendererFailed(reason: "Failed to create image destination") + } + + CGImageDestinationAddImage(destination, cgImage, nil) + + guard CGImageDestinationFinalize(destination) else { + throw VisualizationError.rendererFailed(reason: "PNG encoding failed - image size: \(cgImage.width)x\(cgImage.height)") + } + return data as Data + } else { + fatalError("Visualization requires iOS 16+ or macOS 13+") + } + } +} + +/// Creates a normalized Hann window of the specified size. +func createHannWindow(size: Int) -> [Float] { + vDSP.window( + ofType: Float.self, + usingSequence: .hanningNormalized, + count: size, + isHalfWindow: false + ) +} diff --git a/Sources/AudioSnapshotTesting/WaveformView.swift b/Sources/AudioSnapshotTesting/Visualization/WaveformView.swift similarity index 97% rename from Sources/AudioSnapshotTesting/WaveformView.swift rename to Sources/AudioSnapshotTesting/Visualization/WaveformView.swift index 4626f21..541de47 100644 --- a/Sources/AudioSnapshotTesting/WaveformView.swift +++ b/Sources/AudioSnapshotTesting/Visualization/WaveformView.swift @@ -43,7 +43,7 @@ struct WaveformView: View { } } -public enum WaveformStrategy { +public enum WaveformStrategy: Sendable { case joinedLines case individualLines } diff --git a/Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift b/Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift index 0903a11..549e1af 100644 --- a/Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift +++ b/Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift @@ -3,109 +3,103 @@ import AVFAudio @testable import AudioSnapshotTesting @Test( - .snapshots(record: false, diffTool: .ksdiff), + .audioSnapshot(record: false, strategy: .waveform(width: 3000, height: 800)), arguments: ["sine", "triangle", "square", "sawtooth", "brown", "pink", "white"] ) -@MainActor func fileWaveform(wave: String) async throws { - // TODO: account for retina - - assertSnapshot( + await assertAudioSnapshot( of: try AVAudioPCMBuffer.read(wave: wave), - as: .waveform(width: 3000, height: 800), - named: wave + named: "fileWaveform.\(wave)" ) } -@Test(.snapshots(record: false, diffTool: .ksdiff)) -@MainActor +@Test( + .audioSnapshot(record: false, strategy: .waveform(width: 800, height: 400, strategy: .individualLines)) +) func fileWaveformMetronome() async throws { - assertSnapshot( + await assertAudioSnapshot( of: try AVAudioPCMBuffer.read(wave: "metronome"), - as: .waveform(width: 800, height: 400, strategy: .individualLines), - named: "metronome" + named: "fileWaveformMetronome.metronome" ) } -@Test(.snapshots(record: false, diffTool: .ksdiff)) -@MainActor +@Test( + .audioSnapshot(record: false, strategy: .waveform(width: 4000, height: 1000, strategy: .individualLines, mono: false)) +) func stereoFileWaveform() async throws { - assertSnapshot( + await assertAudioSnapshot( of: try AVAudioPCMBuffer.read(wave: "left-right-metronome"), - as: .waveform(width: 4000, height: 1000, strategy: .individualLines, mono: false) + named: "stereoFileWaveform" ) } -@Test(.snapshots(record: false, diffTool: .ksdiff)) -@MainActor +@Test( + .audioSnapshot(record: false, strategy: .waveform(width: 4000, height: 1000)) +) func fileWaveformOverlay() async throws { let buffer1 = try AVAudioPCMBuffer.read(wave: "sine") let buffer2 = try AVAudioPCMBuffer.read(wave: "square") - assertSnapshot( - of: (buffer1, buffer2), - as: .waveform(width: 4000, height: 1000), - named: "square-over-sine" + await assertAudioSnapshot( + of: [buffer1, buffer2], + named: "fileWaveformOverlay.square-over-sine" ) } -@available(iOS 16, *) +@available(iOS 16, macOS 13, *) @Test( - .snapshots(record: false, diffTool: .ksdiff), + .audioSnapshot(record: false), arguments: ["1hz@32768", "1hz2hz@32768", "2hz@32768"] ) -@MainActor func perfectSpectrum(wave: String) async throws { let buffer = try AVAudioPCMBuffer.read(wave: wave) - assertSnapshot( - of: buffer, - as: .spectrum( + // Override with custom window for perfect spectrum analysis + await AudioSnapshotConfiguration.$current.withValue( + AudioSnapshotTrait(record: false, strategy: .spectrum( width: 500, height: 200, window: .init(repeating: 1, count: Int(buffer.frameLength)) - ), - named: wave - ) + )) + ) { + await assertAudioSnapshot(of: buffer, named: "perfectSpectrum.\(wave)") + } } -@available(iOS 16, *) +@available(iOS 16, macOS 13, *) @Test( - .snapshots(record: false, diffTool: .ksdiff), + .audioSnapshot(record: false, strategy: .spectrum(width: 1500, height: 400)), arguments: ["1hz@44100", "white", "brown", "pink", "square", "triangle", "sawtooth"] ) -@MainActor func windowedSpectrum(wave: String) async throws { let buffer = try AVAudioPCMBuffer.read(wave: wave) - assertSnapshot( + await assertAudioSnapshot( of: buffer, - as: .spectrum(width: 1500, height: 400), - named: wave + named: "windowedSpectrum.\(wave)" ) } -@available(iOS 16, *) +@available(iOS 16, macOS 13, *) @Test( - .snapshots(record: false, diffTool: .ksdiff) + .audioSnapshot(record: false, strategy: .spectrum(width: 1500, height: 400, window: .init(repeating: 1, count: 32768))) ) -@MainActor func spectrumSynthesised() async throws { - // (0..<1024).map { (Float($0), Float($0) / Float(1024)) } let signal = synthesizeSignal(frequencyAmplitudePairs: (0..<1024).map { (Float($0), Float($0) / Float(1024)) }, count: 32768) let buffer = createBuffer(from: signal) - assertSnapshot( + await assertAudioSnapshot( of: buffer, - as: .spectrum(width: 1500, height: 400, window: .init(repeating: 1, count: 32768)), - named: "synthesized" + named: "spectrumSynthesised.synthesized" ) } -@available(iOS 16, *) +@available(iOS 16, macOS 13, *) @Test( "Generates color spectrum spectrogram", - .snapshots(record: false, diffTool: .ksdiff) + .audioSnapshot( + record: false, + strategy: .spectrogram(hopSize: 128, frequencyCount: 1024, window: [Float](repeating: 1, count: 2048), amplitudeScale: .linear, imageWidth: 200) + ) ) -@MainActor func spectrogramColors() async throws { let frequencyCount = 1024 let signal = synthesizeSignal( @@ -114,21 +108,12 @@ func spectrogramColors() async throws { ) let buffer = createBuffer(from: signal + signal + signal + signal) - assertSnapshot( - of: buffer, - as: .spectrogram( - hopSize: 128, - frequencyCount: frequencyCount, - window: [Float](repeating: 1, count: 2048), - amplitudeScale: .linear, - imageWidth: 200 - ) - ) + await assertAudioSnapshot(of: buffer, named: "spectrogramColors") } -@available(iOS 16, *) +@available(iOS 16, macOS 13, *) @Test( - .snapshots(record: false, diffTool: .ksdiff), + .audioSnapshot(record: false, strategy: .spectrogram(hopSize: 4096, frequencyCount: 2048)), arguments: [ "500hz", "2000hz", @@ -144,18 +129,42 @@ func spectrogramColors() async throws { "spring" ] ) -@MainActor func spectrogramHop256(wave: String) async throws { let buffer = try AVAudioPCMBuffer.read(wave: wave) - let frequencyCount = 2048 + await assertAudioSnapshot( + of: buffer, + named: "spectrogramHop256.\(wave)" + ) +} - assertSnapshot( +@Test( + "Audio snapshot of synthesized signal", + .audioSnapshot(record: false, strategy: .waveform(width: 1000, height: 300)) +) +func audioSnapshotSynthesized() async throws { + let signal = synthesizeSignal( + frequencyAmplitudePairs: [(440, 0.5), (880, 0.3)], + count: 4410 + ) + let buffer = createBuffer(from: signal) + + await assertAudioSnapshot( of: buffer, - as: .spectrogram(hopSize: 4096, frequencyCount: frequencyCount), - named: wave + named: "audioSnapshotSynthesized.440-880hz" ) } +private func createBuffer(from samples: [Float]) -> AVAudioPCMBuffer { + let format = AVAudioFormat(standardFormatWithSampleRate: 32768, channels: 1)! + let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(samples.count))! + let channelData = buffer.floatChannelData![0] + samples.enumerated().forEach { index, sample in + channelData[index] = sample + } + buffer.frameLength = AVAudioFrameCount(samples.count) + return buffer +} + private extension AVAudioPCMBuffer { static func read(wave: String) throws -> AVAudioPCMBuffer { let file = try AVAudioFile( @@ -168,7 +177,6 @@ private extension AVAudioPCMBuffer { } /// Synthesize a signal composed of multiple sine waves given frequency-amplitude pairs. -/// Useful for generating test signals with known spectral content. private func synthesizeSignal( frequencyAmplitudePairs: [(f: Float, a: Float)], count: Int @@ -183,29 +191,3 @@ private func synthesizeSignal( return signal } - -@Test("Audio snapshot of synthesized signal", .snapshots(record: false)) -func audioSnapshotSynthesized() async throws { - let signal = synthesizeSignal( - frequencyAmplitudePairs: [(440, 0.5), (880, 0.3)], - count: 4410 - ) - let buffer = createBuffer(from: signal) - - assertSnapshot( - of: buffer, - as: .audio(), - named: "440-880hz" - ) -} - -private func createBuffer(from samples: [Float]) -> AVAudioPCMBuffer { - let format = AVAudioFormat(standardFormatWithSampleRate: 32768, channels: 1)! - let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(samples.count))! - let channelData = buffer.floatChannelData![0] - samples.enumerated().forEach { index, sample in - channelData[index] = sample - } - buffer.frameLength = AVAudioFrameCount(samples.count) - return buffer -} diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.caf new file mode 100644 index 0000000..aaa819e Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.brown.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.brown.caf new file mode 100644 index 0000000..8feb96a Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.brown.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.pink.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.pink.caf new file mode 100644 index 0000000..a51cc88 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.pink.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sawtooth.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sawtooth.caf new file mode 100644 index 0000000..31979b7 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sawtooth.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sine.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sine.caf new file mode 100644 index 0000000..1c2d756 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.sine.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.square.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.square.caf new file mode 100644 index 0000000..782c873 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.square.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.triangle.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.triangle.caf new file mode 100644 index 0000000..0b787c4 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.triangle.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.white.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.white.caf new file mode 100644 index 0000000..dd365db Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveform.white.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.caf new file mode 100644 index 0000000..d9385b3 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.1.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.1.caf new file mode 100644 index 0000000..1c2d756 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.1.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.2.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.2.caf new file mode 100644 index 0000000..782c873 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.2.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz2hz@32768.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz2hz@32768.caf new file mode 100644 index 0000000..9b46e0c Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz2hz@32768.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz@32768.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz@32768.caf new file mode 100644 index 0000000..42cc933 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.1hz@32768.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.2hz@32768.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.2hz@32768.caf new file mode 100644 index 0000000..4224fa7 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/perfectSpectrum.2hz@32768.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramColors.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramColors.caf new file mode 100644 index 0000000..869399a Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramColors.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.15000hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.15000hz.caf new file mode 100644 index 0000000..e9b53e4 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.15000hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.20000hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.20000hz.caf new file mode 100644 index 0000000..96b089a Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.20000hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.2000hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.2000hz.caf new file mode 100644 index 0000000..394adea Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.2000hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.440-880-1320hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.440-880-1320hz.caf new file mode 100644 index 0000000..dded6c1 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.440-880-1320hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.5000hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.5000hz.caf new file mode 100644 index 0000000..6d68219 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.5000hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.500hz.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.500hz.caf new file mode 100644 index 0000000..d872429 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.500hz.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.beating.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.beating.caf new file mode 100644 index 0000000..b4bf863 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.beating.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.chirp.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.chirp.caf new file mode 100644 index 0000000..c573249 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.chirp.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.fade-in-out.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.fade-in-out.caf new file mode 100644 index 0000000..d6e76c6 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.fade-in-out.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.high-sweep.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.high-sweep.caf new file mode 100644 index 0000000..33cc70f Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.high-sweep.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.spring.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.spring.caf new file mode 100644 index 0000000..8df60ab Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.spring.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.ultra-high-tones.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.ultra-high-tones.caf new file mode 100644 index 0000000..4020f8e Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrogramHop256.ultra-high-tones.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.caf new file mode 100644 index 0000000..af68041 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/stereoFileWaveform.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/stereoFileWaveform.caf new file mode 100644 index 0000000..f6f24f1 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/stereoFileWaveform.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.1hz@44100.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.1hz@44100.caf new file mode 100644 index 0000000..106dd70 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.1hz@44100.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.brown.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.brown.caf new file mode 100644 index 0000000..8feb96a Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.brown.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.pink.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.pink.caf new file mode 100644 index 0000000..a51cc88 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.pink.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.sawtooth.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.sawtooth.caf new file mode 100644 index 0000000..31979b7 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.sawtooth.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.square.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.square.caf new file mode 100644 index 0000000..782c873 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.square.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.triangle.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.triangle.caf new file mode 100644 index 0000000..0b787c4 Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.triangle.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.white.caf b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.white.caf new file mode 100644 index 0000000..dd365db Binary files /dev/null and b/Tests/AudioSnapshotTestingTests/__AudioSnapshots__/AudioSnapshotTestingTests/windowedSpectrum.white.caf differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.wav b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.wav deleted file mode 100644 index c757e13..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/audioSnapshotSynthesized.440-880hz.wav and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.brown.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.brown.png deleted file mode 100644 index a174b6f..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.brown.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.pink.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.pink.png deleted file mode 100644 index 5fba1bc..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.pink.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sawtooth.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sawtooth.png deleted file mode 100644 index 6563fc2..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sawtooth.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sine.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sine.png deleted file mode 100644 index 0d40a9f..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.sine.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.square.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.square.png deleted file mode 100644 index 1ca7449..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.square.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.triangle.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.triangle.png deleted file mode 100644 index 5dc5807..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.triangle.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.white.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.white.png deleted file mode 100644 index 5769bbc..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveform-wave.white.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.png deleted file mode 100644 index 01f1d20..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformMetronome.metronome.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.png deleted file mode 100644 index fb5f49d..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/fileWaveformOverlay.square-over-sine.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz-32768.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz-32768.png deleted file mode 100644 index f61abc7..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz-32768.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz2hz-32768.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz2hz-32768.png deleted file mode 100644 index 266f744..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.1hz2hz-32768.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.2hz-32768.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.2hz-32768.png deleted file mode 100644 index 4f70e28..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/perfectSpectrum-wave.2hz-32768.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramColors.1.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramColors.1.png deleted file mode 100644 index 0468833..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramColors.1.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.15000hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.15000hz.png deleted file mode 100644 index 5c07a32..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.15000hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.20000hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.20000hz.png deleted file mode 100644 index 180f0c3..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.20000hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.2000hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.2000hz.png deleted file mode 100644 index 0dabd4b..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.2000hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.440-880-1320hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.440-880-1320hz.png deleted file mode 100644 index 52c5332..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.440-880-1320hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.5000hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.5000hz.png deleted file mode 100644 index 6ffd9f0..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.5000hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.500hz.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.500hz.png deleted file mode 100644 index 9115b46..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.500hz.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.beating.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.beating.png deleted file mode 100644 index 60f940a..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.beating.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.chirp.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.chirp.png deleted file mode 100644 index c8497e6..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.chirp.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.fade-in-out.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.fade-in-out.png deleted file mode 100644 index 528cee3..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.fade-in-out.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.high-sweep.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.high-sweep.png deleted file mode 100644 index 37eb4f8..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.high-sweep.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.spring.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.spring.png deleted file mode 100644 index 41d7b3b..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.spring.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.ultra-high-tones.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.ultra-high-tones.png deleted file mode 100644 index a8b63ea..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrogramHop256-wave.ultra-high-tones.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.png deleted file mode 100644 index baacab7..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/spectrumSynthesised.synthesized.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/stereoFileWaveform.1.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/stereoFileWaveform.1.png deleted file mode 100644 index 0846253..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/stereoFileWaveform.1.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.1hz-44100.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.1hz-44100.png deleted file mode 100644 index ac78bc2..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.1hz-44100.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.brown.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.brown.png deleted file mode 100644 index 228774f..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.brown.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.pink.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.pink.png deleted file mode 100644 index 08e7755..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.pink.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.sawtooth.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.sawtooth.png deleted file mode 100644 index cad453e..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.sawtooth.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.square.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.square.png deleted file mode 100644 index 0ec5503..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.square.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.triangle.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.triangle.png deleted file mode 100644 index 7ede1e8..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.triangle.png and /dev/null differ diff --git a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.white.png b/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.white.png deleted file mode 100644 index 427c21d..0000000 Binary files a/Tests/AudioSnapshotTestingTests/__Snapshots__/AudioSnapshotTestingTests/windowedSpectrum-wave.white.png and /dev/null differ