1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
|
//
// TexTellerModel.swift
// iTexSnip
//
// Created by Navan Chauhan on 10/20/24.
//
import OnnxRuntimeBindings
import AppKit
public enum ModelError: Error {
case encoderModelNotFound
case decoderModelNotFound
case imageError
}
public struct TexTellerModel {
public let encoderSession: ORTSession
public let decoderSession: ORTSession
private let tokenizer: RobertaTokenizerFast
public init() throws {
guard let encoderModelPath = Bundle.main.path(forResource: "encoder_model", ofType: "onnx") else {
print("Encoder model not found...")
throw ModelError.encoderModelNotFound
}
guard let decoderModelPath = Bundle.main.path(forResource: "decoder_model", ofType: "onnx") else {
print("Decoder model not found...")
throw ModelError.decoderModelNotFound
}
let env = try ORTEnv(loggingLevel: .warning)
let coreMLOptions = ORTCoreMLExecutionProviderOptions()
coreMLOptions.enableOnSubgraphs = true
coreMLOptions.createMLProgram = false
let options = try ORTSessionOptions()
// try options.appendCoreMLExecutionProvider(with: coreMLOptions)
encoderSession = try ORTSession(env: env, modelPath: encoderModelPath, sessionOptions: options)
decoderSession = try ORTSession(env: env, modelPath: decoderModelPath, sessionOptions: options)
self.tokenizer = RobertaTokenizerFast(vocabFile: "vocab", tokenizerFile: "tokenizer")
}
public static func asyncInit() async throws -> TexTellerModel {
return try await withCheckedThrowingContinuation { continuation in
DispatchQueue.global(qos: .userInitiated).async {
do {
let model = try TexTellerModel()
continuation.resume(returning: model)
} catch {
continuation.resume(throwing: error)
}
}
}
}
public func texIt(_ image: NSImage, rawString: Bool = false, debug: Bool = false) throws -> String {
let transformedImage = inferenceTransform(images: [image])
if let firstTransformedImage = transformedImage.first {
let pixelValues = ciImageToFloatArray(firstTransformedImage, size: CGSize(width: FIXED_IMG_SIZE, height: FIXED_IMG_SIZE))
if debug {
print("First few pixel inputs: \(pixelValues.prefix(10))")
}
let inputTensor = try ORTValue(
tensorData: NSMutableData(
data: Data(bytes: pixelValues, count: pixelValues.count * MemoryLayout<Float>.stride)
),
elementType: .float,
shape: [
1, 1, NSNumber(value: FIXED_IMG_SIZE), NSNumber(value: FIXED_IMG_SIZE)
]
)
let encoderInput: [String: ORTValue] = [
"pixel_values": inputTensor
]
let encoderOutputNames = try self.encoderSession.outputNames()
let encoderOutputs: [String: ORTValue] = try self.encoderSession.run(
withInputs: encoderInput,
outputNames: Set(encoderOutputNames),
runOptions: nil
)
if (debug) {
print("Encoder output: \(encoderOutputs)")
}
var decodedTokenIds: [Int] = []
let startTokenId = 0 // TODO: Move to tokenizer directly?
let endTokenId = 2
let maxDecoderLength: Int = 300
var decoderInputIds: [Int] = [startTokenId]
let vocabSize = 15000
if (debug) {
let encoderHiddenStatesData = try encoderOutputs["last_hidden_state"]!.tensorData() as Data
let encoderHiddenStatesArray = encoderHiddenStatesData.withUnsafeBytes {
Array(UnsafeBufferPointer<Float>(
start: $0.baseAddress!.assumingMemoryBound(to: Float.self),
count: encoderHiddenStatesData.count / MemoryLayout<Float>.stride
))
}
print("First few values of encoder hidden states: \(encoderHiddenStatesArray.prefix(10))")
}
let decoderOutputNames = try self.decoderSession.outputNames()
for step in 0..<maxDecoderLength {
if (debug) {
print("Step \(step)")
}
let decoderInputIdsTensor = try ORTValue(
tensorData: NSMutableData(data: Data(bytes: decoderInputIds, count: decoderInputIds.count * MemoryLayout<Int64>.stride)),
elementType: .int64,
shape: [1, NSNumber(value: decoderInputIds.count)]
)
let decoderInputs: [String: ORTValue] = [
"input_ids": decoderInputIdsTensor,
"encoder_hidden_states": encoderOutputs["last_hidden_state"]!
]
let decoderOutputs: [String: ORTValue] = try self.decoderSession.run(withInputs: decoderInputs, outputNames: Set(decoderOutputNames), runOptions: nil)
let logitsTensor = decoderOutputs["logits"]!
let logitsData = try logitsTensor.tensorData() as Data
let logits = logitsData.withUnsafeBytes {
Array(UnsafeBufferPointer<Float>(
start: $0.baseAddress!.assumingMemoryBound(to: Float.self),
count: logitsData.count / MemoryLayout<Float>.stride
))
}
let sequenceLength = decoderInputIds.count
let startIndex = (sequenceLength - 1) * vocabSize
let endIndex = startIndex + vocabSize
let lastTokenLogits = Array(logits[startIndex..<endIndex])
let nextTokenId = lastTokenLogits.enumerated().max(by: { $0.element < $1.element})?.offset ?? 9 // TODO: Should I track if this fails
if (debug) {
print("Next token id: \(nextTokenId)")
}
if nextTokenId == endTokenId {
break
}
decodedTokenIds.append(nextTokenId)
decoderInputIds.append(nextTokenId)
}
if rawString {
return tokenizer.decode(tokenIds: decodedTokenIds)
}
return toKatex(formula: tokenizer.decode(tokenIds: decodedTokenIds))
}
throw ModelError.imageError
}
public func texIt(_ image: NSImage, rawString: Bool = false, debug: Bool = false) async throws -> String {
return try await withCheckedThrowingContinuation { continuation in
DispatchQueue.global(qos: .userInitiated).async {
do {
let result = try self.texIt(image, rawString: rawString, debug: debug)
continuation.resume(returning: result)
} catch {
continuation.resume(throwing: error)
}
}
}
}
}
|