Skip to content

Commit 620eef1

Browse files
authored
Fix code docs that got out of sync (#272)
* Fix doc for `callAsFunction` that got out of sync * More fixes
1 parent c130706 commit 620eef1

File tree

3 files changed

+5
-6
lines changed

3 files changed

+5
-6
lines changed

Examples/transformers-cli/Sources/transformers-cli/Transformers.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ enum ComputeUnits: String, ExpressibleByArgument, CaseIterable {
125125

126126
/// Returns a cleaned and formatted version of the response.
127127
///
128-
/// - Parameter respone: The response to clean and format.
128+
/// - Parameter response: The response to clean and format.
129129
/// - Returns: A 'user friendly' representation of the generated response.
130130
private func formatResponse(_ response: String) -> String {
131131
response

Sources/Models/LanguageModel.swift

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,8 +236,7 @@ public extension LanguageModel {
236236

237237
/// Determines the type of KV Cache available for the model, if any.
238238
///
239-
/// - Parameters:
240-
/// - model: The Core ML model
239+
/// - Parameter model: The Core ML model
241240
/// - Returns: The type of KV Cache available.
242241
fileprivate static func kvCacheAvailability(for model: MLModel) -> KVCacheAvailability? {
243242
func isStatefulKVCacheAvailable(for model: MLModel) -> Bool {

Sources/Models/LanguageModelTypes.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,9 @@ public extension LanguageModelProtocol {
5757
/// This provides a more convenient syntax for calling `predictNextTokenScores`.
5858
///
5959
/// - Parameters:
60-
/// - tokens: The input token sequence
61-
/// - config: The generation configuration containing model parameters
62-
/// - Returns: A shaped array containing the logits for the next token prediction
60+
/// - input: The input sequence tensor.
61+
/// - config: The generation configuration containing model parameters.
62+
/// - Returns: MLTensor with the raw scores of the next token.
6363
func callAsFunction(_ input: MLTensor, config: GenerationConfig) async -> MLTensor {
6464
await predictNextTokenScores(input, config: config)
6565
}

0 commit comments

Comments
 (0)