Skip to content

Commit c69bcf3

Browse files
GeraldGerald
Gerald
authored and
Gerald
committed
change model loading, add measure, update requriements.txt
1 parent 8046eb2 commit c69bcf3

File tree

6 files changed

+56
-9
lines changed

6 files changed

+56
-9
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
__pycache__
22
*.pyc
33
*~
4+
xcuserdata

Pytorch-CoreML-Spectrogram.xcodeproj/project.pbxproj

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@
426426
CODE_SIGN_STYLE = Automatic;
427427
DEVELOPMENT_TEAM = K6KE3UBBT4;
428428
INFOPLIST_FILE = "Pytorch-CoreML-SpectrogramTests/Info.plist";
429-
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
429+
IPHONEOS_DEPLOYMENT_TARGET = 13.0;
430430
LD_RUNPATH_SEARCH_PATHS = (
431431
"$(inherited)",
432432
"@executable_path/Frameworks",
@@ -448,7 +448,7 @@
448448
CODE_SIGN_STYLE = Automatic;
449449
DEVELOPMENT_TEAM = K6KE3UBBT4;
450450
INFOPLIST_FILE = "Pytorch-CoreML-SpectrogramTests/Info.plist";
451-
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
451+
IPHONEOS_DEPLOYMENT_TARGET = 13.0;
452452
LD_RUNPATH_SEARCH_PATHS = (
453453
"$(inherited)",
454454
"@executable_path/Frameworks",

Pytorch-CoreML-Spectrogram/ViewController.swift

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class ViewController: UIViewController {
1919
// set up for audio
2020
private let audioEngine = AVAudioEngine()
2121
// specify the audio samples format the CoreML model
22-
let desiredAudioFormat: AVAudioFormat = {
22+
private let desiredAudioFormat: AVAudioFormat = {
2323
let avAudioChannelLayout = AVAudioChannelLayout(layoutTag: kAudioChannelLayoutTag_Mono)!
2424
return AVAudioFormat(
2525
commonFormat: .pcmFormatFloat32,
@@ -30,10 +30,12 @@ class ViewController: UIViewController {
3030
}()
3131

3232
// create a queue to do analysis on a separate thread
33-
let analysisQueue = DispatchQueue(label: "com.myco.AnalysisQueue")
33+
private let analysisQueue = DispatchQueue(label: "com.myco.AnalysisQueue")
3434

3535
// instantiate our model
36-
let model = wave__melspec()
36+
37+
38+
var model : wave__melspec? = nil
3739
typealias NetworkInput = wave__melspecInput
3840
typealias NetworkOutput = wave__melspecOutput
3941

@@ -46,12 +48,24 @@ class ViewController: UIViewController {
4648
override func viewDidLoad() {
4749
super.viewDidLoad()
4850
// Do any additional setup after loading the view.
51+
load_model()
4952
}
5053

5154
override func viewDidAppear(_ animated: Bool) {
5255
startAudioEngine()
5356
}
5457

58+
private func load_model() {
59+
let config = MLModelConfiguration()
60+
config.computeUnits = .all
61+
do {
62+
self.model = try wave__melspec( configuration: config )
63+
} catch {
64+
fatalError( "unable to load ML model!" )
65+
}
66+
67+
}
68+
5569
// audio capture via microphone
5670
private func startAudioEngine() {
5771

@@ -151,7 +165,8 @@ class ViewController: UIViewController {
151165

152166

153167
func predict_provider(provider: MLDictionaryFeatureProvider ) {
154-
if let outFeatures = try? self.model.model.prediction(from: provider) {
168+
169+
if let outFeatures = try? self.model?.model.prediction(from: provider) {
155170
// release the semaphore as soon as the model is done
156171
self.semaphore.signal()
157172

Pytorch-CoreML-SpectrogramTests/Pytorch_CoreML_SpectrogramTests.swift

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,10 +151,40 @@ class Pytorch_CoreML_SpectrogramTests: XCTestCase {
151151

152152
}
153153

154-
func testPerformanceExample() throws {
154+
func test_inference_time() throws {
155155
// This is an example of a performance test case.
156+
let model = wave__melspec()
157+
158+
let array_shape: [NSNumber] = [1, 12800]
159+
let audioData = try! MLMultiArray(shape: array_shape, dataType: MLMultiArrayDataType.float32 )
160+
let inputs: [String: Any] = [
161+
"input.1": audioData,
162+
]
163+
// container for ML Model inputs
164+
let provider = try! MLDictionaryFeatureProvider(dictionary: inputs)
165+
156166
self.measure {
157167
// Put the code you want to measure the time of here.
168+
let N = 100
169+
let start_time = CACurrentMediaTime()
170+
let options = MLPredictionOptions()
171+
// options.usesCPUOnly = true
172+
for _ in 0..<N {
173+
_ = try? model.model.prediction(
174+
from: provider,
175+
options: options
176+
)
177+
}
178+
let elapsed = CACurrentMediaTime() - start_time
179+
print( "avg inference time: \(elapsed/Double(N))")
180+
/* simulator:
181+
avg inference time: 0.011592097150278279
182+
w/ CPUOnly: avg inference time: 0.011968133399495855
183+
on iPhone XR
184+
avg inference time: 0.003219694583094679
185+
w/ CPUOnly: avg inference time: 0.003034873333526775
186+
187+
*/
158188
}
159189
}
160190

python/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def convert_to_coreml( self, fn_mlmodel, sample_input, plot_specs=True ):
130130
assert torch_output.shape == mlmodel_output.shape
131131

132132
print( 'sum diff ', np.sum( np.abs( torch_output-mlmodel_output) ), np.max( np.abs( torch_output-mlmodel_output) ) )
133-
# !!!!!!!!! assert np.allclose( torch_output, mlmodel_output, atol=1e-3, rtol=1e-5 )
133+
assert np.allclose( torch_output, mlmodel_output, atol=2, rtol=2 ) # big tolerance due to log scale
134134

135135
print( 'Successful MLModel conversion to %s!' % fn_mlmodel )
136136

python/requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
torch
22
librosa
3-
torchlibrosa
3+
# torchlibrosa
4+
-e git://github.com/ml-illustrated/torchlibrosa.git#egg=torchlibrosa
45
SoundFile
56
onnx
67
onnxruntime

0 commit comments

Comments
 (0)