diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index da39ab0e..c10c3496 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,26 +19,26 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Validate Gradle Wrapper - uses: gradle/wrapper-validation-action@v1 + uses: gradle/actions/wrapper-validation@v3 - name: Configure JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 - name: Setup Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 - name: Run lint run: ./gradlew spotlessCheck test-jvm: name: Test JVM - runs-on: macos-11 + runs-on: ubuntu-latest needs: lint strategy: matrix: @@ -73,19 +73,19 @@ jobs: test: "*.misc.*" steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Validate Gradle Wrapper - uses: gradle/wrapper-validation-action@v1 + uses: gradle/actions/wrapper-validation@v3 - name: Configure JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 - name: Setup Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 - name: Test ${{ matrix.name }} run: ./gradlew :openai-client:jvmTest --tests ${{ matrix.test }} @@ -100,19 +100,19 @@ jobs: contents: write steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Validate Gradle Wrapper - uses: gradle/wrapper-validation-action@v1 + uses: gradle/actions/wrapper-validation@v3 - name: Configure JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 - name: Setup Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 - name: Build docs run: ./gradlew dokkaHtmlMultiModule diff --git a/.github/workflows/publish-snapshot.yml b/.github/workflows/publish-snapshot.yml index ded52e9b..540abe42 100644 --- a/.github/workflows/publish-snapshot.yml +++ b/.github/workflows/publish-snapshot.yml @@ -13,7 +13,7 @@ jobs: is_snapshot: ${{ steps.check_snapshot.outputs.is_snapshot }} steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for SNAPSHOT suffix id: check_snapshot run: | @@ -26,21 +26,21 @@ jobs: publish: name: Publish to Snapshot - runs-on: macos-11 + runs-on: macos-latest needs: check-version if: ${{ needs.check-version.outputs.is_snapshot == 'true' }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 - name: Setup Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 - name: Upload Artifacts run: ./gradlew publishAllPublicationsToMavenCentral diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3e59dc00..80a991cc 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,19 +9,19 @@ env: jobs: publish: name: Publish to Sonatype - runs-on: macos-11 + runs-on: macos-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Configure JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 - name: Setup Gradle - uses: gradle/gradle-build-action@v2 + uses: gradle/gradle-build-action@v3 - name: Upload Artifacts run: ./gradlew publishAllPublicationsToMavenCentral --no-configuration-cache diff --git a/.gitignore b/.gitignore index 0de18f92..b5d2df59 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,6 @@ coverage-error.log *.hprof # kmp -kotlin-js-store \ No newline at end of file +kotlin-js-store + +.kotlin diff --git a/CHANGELOG.md b/CHANGELOG.md index 3419cfce..258db3f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,41 @@ -## Unreleased +## 4.0.1 +> Published 02 Feb 2025 + +### Added +- **chat**: add reasoning effort, max completion tokens, store options for reasoning model support (#415) (thanks @Taewan-P) + +## 4.0.0 +> Published 01 Feb 2025 + +### Added +- WasmJs target (#387) +- **assistants**: add structured response (#391) (thanks @ahmedmirza994) +- **chat**: Add support for structured outputs (#397) +- **chat**: make ChatCompletionRequest a data class (#399) (thanks @yunmanger1) +- **assistant**: add streaming (#400) (thanks @Daltomon) + +### Fixed +- **runs**: support for file search tool calls (#405) (thanks @averyaube) + +### Changed +- Upgrade to Kotlin 2.0 (#387) +- Update Ktor to 3.0 (#387) + +### Breaking Changes +- Replace okio by kotlinx.io (#387) + +## 3.8.2 ### Added - **chat**: add stream options (#363) - **chat**: add `instanceId` field to `ChatCompletionRequest` (#359) (thanks @harishv-collab) +- **messages**: add assistant tools to attachments (#370) +- **assistants**: add `attachments.fileId` field to `MessageRequest` (#367) (thanks @0chil) +- **runs**: feat(runs): add new fields into assistant run (#366) (thanks @ahmedmirza994) + +### Fixed +- **messages**: update `quote` to optional field for openai compatibility assistant on FileCitation (#361) (thanks @AntoDev) +- **messages**: serialization exception while retrieving thread messages (#369) (thanks @meroving) # 3.8.1 > Published 28 Jun 2024 diff --git a/README.md b/README.md index 34aa4911..0d9458a1 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "com.aallam.openai:openai-client:3.8.1" + implementation "com.aallam.openai:openai-client:4.0.1" } ``` @@ -30,7 +30,7 @@ Alternatively, you can use [openai-client-bom](/openai-client-bom) by adding th ```groovy dependencies { // import Kotlin API client BOM - implementation platform('com.aallam.openai:openai-client-bom:3.8.1') + implementation platform('com.aallam.openai:openai-client-bom:4.0.1') // define dependencies without versions implementation 'com.aallam.openai:openai-client' diff --git a/build-support/build.gradle.kts b/build-support/build.gradle.kts index 182bf712..8045b8b8 100644 --- a/build-support/build.gradle.kts +++ b/build-support/build.gradle.kts @@ -8,7 +8,7 @@ repositories { } dependencies { - compileOnly(kotlin("gradle-plugin")) + compileOnly(kotlin("gradle-plugin", "2.0.0")) compileOnly(kotlin("gradle-plugin-api")) } diff --git a/build-support/src/main/kotlin/Platforms.kt b/build-support/src/main/kotlin/Platforms.kt index 74abc279..57660c15 100644 --- a/build-support/src/main/kotlin/Platforms.kt +++ b/build-support/src/main/kotlin/Platforms.kt @@ -1,6 +1,8 @@ import org.gradle.kotlin.dsl.creating import org.gradle.kotlin.dsl.getValue import org.jetbrains.kotlin.gradle.dsl.KotlinMultiplatformExtension +import org.jetbrains.kotlin.konan.target.HostManager +import org.jetbrains.kotlin.gradle.targets.js.dsl.ExperimentalWasmDsl fun KotlinMultiplatformExtension.native() { sourceSets.apply { @@ -17,28 +19,37 @@ fun KotlinMultiplatformExtension.native() { } // Darwin targets - val darwinMain by creating { dependsOn(nativeMain) } - val darwinTest by creating { dependsOn(nativeTest) } - listOf( - iosX64(), - iosArm64(), - iosSimulatorArm64(), - macosX64(), - macosArm64(), - tvosX64(), - tvosArm64(), - tvosSimulatorArm64(), - watchosArm32(), - watchosArm64(), - watchosX64(), - watchosSimulatorArm64(), - ).forEach { target -> - getByName("${target.name}Main").dependsOn(darwinMain) - getByName("${target.name}Test").dependsOn(darwinTest) + if (HostManager.hostIsMac) { + val darwinMain by creating { dependsOn(nativeMain) } + val darwinTest by creating { dependsOn(nativeTest) } + listOf( + iosX64(), + iosArm64(), + iosSimulatorArm64(), + macosX64(), + macosArm64(), + tvosX64(), + tvosArm64(), + tvosSimulatorArm64(), + watchosArm32(), + watchosArm64(), + watchosX64(), + watchosSimulatorArm64(), + ).forEach { target -> + getByName("${target.name}Main").dependsOn(darwinMain) + getByName("${target.name}Test").dependsOn(darwinTest) + } } } } +@OptIn(ExperimentalWasmDsl::class) +fun KotlinMultiplatformExtension.jsWasm() { + wasmJs { + nodejs() + } +} + fun KotlinMultiplatformExtension.jsNode() { js { compilations.all { @@ -48,12 +59,6 @@ fun KotlinMultiplatformExtension.jsNode() { metaInfo = true } } - nodejs { - testTask { - useMocha { - timeout = "300s" - } - } - } + nodejs() } } diff --git a/gradle.properties b/gradle.properties index cc91110b..1a9d048d 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,11 +1,10 @@ kotlin.code.style=official kotlin.mpp.stability.nowarn=true kotlin.mpp.commonizerLogLevel=info -kotlin.js.compiler=ir # Lib GROUP=com.aallam.openai -VERSION_NAME=3.8.2-SNAPSHOT +VERSION_NAME=4.0.1 # OSS SONATYPE_HOST=DEFAULT diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 872e7ca3..31ef2341 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,9 +1,9 @@ [versions] -kotlin = "1.9.0" -coroutines = "1.7.2" -serialization = "1.5.1" -ktor = "2.3.2" -okio = "3.4.0" +kotlin = "2.0.20" +coroutines = "1.8.1" +serialization = "1.7.3" +ktor = "3.0.0" +kotlinio = "0.5.4" logback = "1.4.8" [libraries] @@ -28,10 +28,9 @@ ktor-client-jetty = { group = "io.ktor", name = "ktor-client-jetty", version.ref ktor-client-mock = { group = "io.ktor", name = "ktor-client-mock", version.ref = "ktor" } ktor-client-okhttp = { group = "io.ktor", name = "ktor-client-okhttp", version.ref = "ktor" } ktor-client-darwin = { group = "io.ktor", name = "ktor-client-darwin", version.ref = "ktor" } -# Okio -okio = { group = "com.squareup.okio", name = "okio", version.ref = "okio" } -okio-nodefilesystem = { group = "com.squareup.okio", name = "okio-nodefilesystem", version.ref = "okio" } -okio-fakefilesystem = { group = "com.squareup.okio", name = "okio-fakefilesystem", version.ref = "okio" } +# IO +kotlinx-io-core = { group = "org.jetbrains.kotlinx", name = "kotlinx-io-core", version.ref = "kotlinio" } +kotlinx-io-bytestring = { group = "org.jetbrains.kotlinx", name = "kotlinx-io-bytestring", version.ref = "kotlinio" } # Logback logback-classic = { group = "ch.qos.logback", name = "logback-classic", version.ref = "logback" } # ulid @@ -41,7 +40,7 @@ ktoken = { group = "com.aallam.ktoken", name = "ktoken", version = "0.3.0" } [plugins] kotlin-multiplaform = { id = "org.jetbrains.kotlin.multiplatform", version.ref = "kotlin" } kotlinx-serialization = { id = "org.jetbrains.kotlin.plugin.serialization", version.ref = "kotlin" } -kotlinx-binary-validator = { id = "org.jetbrains.kotlinx.binary-compatibility-validator", version = "0.13.2" } +kotlinx-binary-validator = { id = "org.jetbrains.kotlinx.binary-compatibility-validator", version = "0.14.0" } maven-publish = { id = "com.vanniktech.maven.publish", version = "0.25.3" } -spotless = { id = "com.diffplug.gradle.spotless", version = "6.20.0" } -dokka = { id = "org.jetbrains.dokka", version = "1.8.20" } \ No newline at end of file +spotless = { id = "com.diffplug.gradle.spotless", version = "6.25.0" } +dokka = { id = "org.jetbrains.dokka", version = "1.9.20" } \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 9f4197d5..df97d72b 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.2.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/guides/GettingStarted.md b/guides/GettingStarted.md index 247f3798..0da2e5bf 100644 --- a/guides/GettingStarted.md +++ b/guides/GettingStarted.md @@ -569,7 +569,7 @@ Modifies an assistant. val assistant = openAI.assistant( id = AssistantId("asst_abc123"), request = AssistantRequest( instructions = "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", - tools = listOf(AssistantTool.RetrievalTool), + tools = listOf(AssistantTool.FileSearch), model = ModelId("gpt-4"), fileIds = listOf(FileId("file-abc123"), FileId("file-abc123")), ) @@ -849,3 +849,57 @@ val runSteps = openAI.runSteps( runId = RunId("run_abc123") ) ``` + +### Event streaming + +Create a thread and run it in one request and process streaming events. + +```kotlin +openAI.createStreamingThreadRun( + request = ThreadRunRequest( + assistantId = AssistantId("asst_abc123"), + thread = ThreadRequest( + messages = listOf( + ThreadMessage( + role = Role.User, + content = "Explain deep learning to a 5 year old." + ) + ) + ), + ) + .onEach { assistantStreamEvent: AssistantStreamEvent -> println(assistantStreamEvent) } + .collect() +) +``` + +Get data object from AssistantStreamEvent. + +```kotlin +//Type of data for generic type can be found in AssistantStreamEventType +when(assistantStreamEvent.type) { + AssistantStreamEventType.THREAD_CREATED -> { + val thread = assistantStreamEvent.getData() + ... + } + AssistantStreamEventType.MESSAGE_CREATED -> { + val message = assistantStreamEvent.getData() + ... + } + AssistantStreamEventType.UNKNOWN -> { + //Data field is a string and can be used instead of calling getData + val data = assistantStreamEvent.data + //Handle unknown message type + } +} +``` + +If a new event type is released before the library is updated, you can create and deserialize your own type by providing a KSerializer. + +```kotlin +when(assistantStreamEvent.type) { + AssistantStreamEventType.UNKNOWN -> { + val data = assistantStreamEvent.getDate(myCustomSerializer) + ... + } +} +``` diff --git a/openai-client/build.gradle.kts b/openai-client/build.gradle.kts index 90d67894..48a3321e 100644 --- a/openai-client/build.gradle.kts +++ b/openai-client/build.gradle.kts @@ -1,3 +1,5 @@ +import org.jetbrains.kotlin.konan.target.HostManager + plugins { kotlin("multiplatform") kotlin("plugin.serialization") @@ -12,6 +14,7 @@ kotlin { explicitApi() jvm() jsNode() + jsWasm() native() sourceSets { @@ -29,7 +32,8 @@ kotlin { dependencies { api(projects.openaiCore) api(libs.coroutines.core) - api(libs.okio) + api(libs.kotlinx.io.core) + implementation(libs.kotlinx.io.bytestring) implementation(libs.serialization.json) api(libs.ktor.client.core) implementation(libs.ktor.client.logging) @@ -44,8 +48,6 @@ kotlin { implementation(kotlin("test-common")) implementation(kotlin("test-annotations-common")) implementation(libs.coroutines.test) - implementation(libs.okio.fakefilesystem) - implementation(libs.ulid) } } val jvmMain by getting @@ -59,7 +61,6 @@ kotlin { val jsMain by getting { dependencies { - implementation(libs.okio.nodefilesystem) } } val jsTest by getting { @@ -67,14 +68,25 @@ kotlin { implementation(kotlin("test-js")) } } + val wasmJsMain by getting { + dependencies { + } + } + val wasmJsTest by getting { + dependencies { + implementation(kotlin("test-wasm-js")) + } + } val desktopTest by getting { dependencies { implementation(libs.ktor.client.curl) } } - val darwinTest by getting { - dependencies { - implementation(libs.ktor.client.darwin) + if (HostManager.hostIsMac) { + val darwinTest by getting { + dependencies { + implementation(libs.ktor.client.darwin) + } } } } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Closeable.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Closeable.kt deleted file mode 100644 index 101c8d4e..00000000 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Closeable.kt +++ /dev/null @@ -1,13 +0,0 @@ -package com.aallam.openai.client - -/** - * Defines a closeable resource. - * This will be replaced by [AutoCloseable] once it becomes stable. - */ -public expect interface Closeable { - - /** - * Closes underlying resources - */ - public fun close() -} diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt index f38771fe..ff7b7560 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt @@ -11,7 +11,7 @@ import kotlin.time.Duration.Companion.seconds * OpenAI API. */ public interface OpenAI : Completions, Files, Edits, Embeddings, Models, Moderations, FineTunes, Images, Chat, Audio, - FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Closeable + FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Batch, AutoCloseable /** * Creates an instance of [OpenAI]. diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Runs.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Runs.kt index 1d714e6b..a518124f 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Runs.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Runs.kt @@ -6,6 +6,8 @@ import com.aallam.openai.api.core.SortOrder import com.aallam.openai.api.core.Status import com.aallam.openai.api.run.* import com.aallam.openai.api.thread.ThreadId +import io.ktor.sse.ServerSentEvent +import kotlinx.coroutines.flow.Flow /** * Represents an execution run on a thread. @@ -23,6 +25,21 @@ public interface Runs { @BetaOpenAI public suspend fun createRun(threadId: ThreadId, request: RunRequest, requestOptions: RequestOptions? = null): Run + /** + * Create a run with event streaming. + * + * @param threadId The ID of the thread to run + * @param request request for a run + * @param requestOptions request options. + * @param block a lambda function that will be called for each event. + */ + @BetaOpenAI + public suspend fun createStreamingRun( + threadId: ThreadId, + request: RunRequest, + requestOptions: RequestOptions? = null + ) : Flow + /** * Retrieves a run. * @@ -92,6 +109,25 @@ public interface Runs { requestOptions: RequestOptions? = null ): Run + /** + * When a run has the status: [Status.RequiresAction] and required action is [RequiredAction.SubmitToolOutputs], + * this endpoint can be used to submit the outputs from the tool calls once they're all completed. + * All outputs must be submitted in a single request using event streaming. + * + * @param threadId the ID of the thread to which this run belongs + * @param runId the ID of the run to submit tool outputs for + * @param output list of tool outputs to submit + * @param requestOptions request options. + * @param block a lambda function that will be called for each event. + */ + @BetaOpenAI + public suspend fun submitStreamingToolOutput( + threadId: ThreadId, + runId: RunId, + output: List, + requestOptions: RequestOptions? = null + ) : Flow + /** * Cancels a run that is [Status.InProgress]. * @@ -111,6 +147,19 @@ public interface Runs { @BetaOpenAI public suspend fun createThreadRun(request: ThreadRunRequest, requestOptions: RequestOptions? = null): Run + /** + * Create a thread and run it in one request with event streaming. + * + * @param request request for a thread run + * @param requestOptions request options. + * @param block a lambda function that will be called for each event. + */ + @BetaOpenAI + public suspend fun createStreamingThreadRun( + request: ThreadRunRequest, + requestOptions: RequestOptions? = null + ) : Flow + /** * Retrieves a run step. * diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/AssistantStreamEvent.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/AssistantStreamEvent.kt new file mode 100644 index 00000000..c66ba111 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/AssistantStreamEvent.kt @@ -0,0 +1,30 @@ +package com.aallam.openai.client.extension + +import com.aallam.openai.api.run.AssistantStreamEvent +import com.aallam.openai.client.internal.JsonLenient +import kotlinx.serialization.KSerializer + +/** + * Get the data of the [AssistantStreamEvent] using the provided [serializer] from the corresponding event type. + * @param the type of the data. + * @throws IllegalStateException if the [AssistantStreamEvent] data is null. + * @throws ClassCastException if the [AssistantStreamEvent] data cannot be cast to the provided type. + */ +@Suppress("UNCHECKED_CAST") +public fun AssistantStreamEvent.getData(): T { + return type + .let { it.serializer as? KSerializer } + ?.let(::getData) + ?: throw IllegalStateException("Failed to decode ServerSentEvent: $rawType") +} + + +/** + * Get the data of the [AssistantStreamEvent] using the provided [serializer]. + * @throws IllegalStateException if the [AssistantStreamEvent] data is null. + * @throws ClassCastException if the [AssistantStreamEvent] data cannot be cast to the provided type. + */ +public fun AssistantStreamEvent.getData(serializer: KSerializer): T = + data + ?.let { JsonLenient.decodeFromString(serializer, it) } + ?: throw IllegalStateException("ServerSentEvent data was null: $rawType") diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/ServerSentEvent.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/ServerSentEvent.kt new file mode 100644 index 00000000..635c0d43 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/extension/ServerSentEvent.kt @@ -0,0 +1,19 @@ +package com.aallam.openai.client.extension + +import com.aallam.openai.api.run.AssistantStreamEvent +import com.aallam.openai.api.run.AssistantStreamEventType +import com.aallam.openai.client.internal.JsonLenient +import io.ktor.sse.ServerSentEvent +import kotlinx.serialization.KSerializer + +/** + * Convert a [ServerSentEvent] to [AssistantStreamEvent]. Type will be [AssistantStreamEventType.UNKNOWN] if the event is null or unrecognized. + */ +internal fun ServerSentEvent.toAssistantStreamEvent() : AssistantStreamEvent = + AssistantStreamEvent( + event, + event + ?.let(AssistantStreamEventType::fromEvent) + ?:AssistantStreamEventType.UNKNOWN, + data + ) diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/HttpClient.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/HttpClient.kt index 85280f74..135d3e64 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/HttpClient.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/HttpClient.kt @@ -11,6 +11,7 @@ import io.ktor.client.plugins.auth.* import io.ktor.client.plugins.auth.providers.* import io.ktor.client.plugins.contentnegotiation.* import io.ktor.client.plugins.logging.* +import io.ktor.client.plugins.sse.SSE import io.ktor.http.* import io.ktor.serialization.kotlinx.* import io.ktor.util.* @@ -71,6 +72,8 @@ internal fun createHttpClient(config: OpenAIConfig): HttpClient { exponentialDelay(config.retry.base, config.retry.maxDelay.inWholeMilliseconds) } + install(SSE) + defaultRequest { url(config.host.baseUrl) config.host.queryParams.onEach { (key, value) -> url.parameters.appendIfNameAbsent(key, value) } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt index 68b7cc2e..4612c433 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt @@ -29,4 +29,4 @@ internal class OpenAIApi( Messages by MessagesApi(requester), VectorStores by VectorStoresApi(requester), Batch by BatchApi(requester), - Closeable by requester + AutoCloseable by requester diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/AudioApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/AudioApi.kt index 85dcb734..7246d3b7 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/AudioApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/AudioApi.kt @@ -19,11 +19,8 @@ internal class AudioApi(val requester: HttpRequester) : Audio { @BetaOpenAI override suspend fun transcription(request: TranscriptionRequest, requestOptions: RequestOptions?): Transcription { return when (request.responseFormat) { - AudioResponseFormat.Json, AudioResponseFormat.VerboseJson, null -> transcriptionAsJson( - request, - requestOptions - ) - + AudioResponseFormat.Json, AudioResponseFormat.VerboseJson, null -> + transcriptionAsJson(request, requestOptions) else -> transcriptionAsString(request, requestOptions) } } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/CompletionsApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/CompletionsApi.kt index a42d7205..a8cc916a 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/CompletionsApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/CompletionsApi.kt @@ -10,7 +10,6 @@ import com.aallam.openai.client.internal.http.perform import io.ktor.client.call.* import io.ktor.client.request.* import io.ktor.http.* -import io.ktor.util.* import kotlinx.coroutines.flow.Flow import kotlinx.coroutines.flow.flow @@ -29,7 +28,6 @@ internal class CompletionsApi(private val requester: HttpRequester) : Completion } } - @OptIn(InternalAPI::class) override fun completions(request: CompletionRequest): Flow { val builder = HttpRequestBuilder().apply { method = HttpMethod.Post diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/RunsApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/RunsApi.kt index 6b63177b..7dfa17ce 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/RunsApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/RunsApi.kt @@ -1,5 +1,6 @@ package com.aallam.openai.client.internal.api +import com.aallam.openai.api.BetaOpenAI import com.aallam.openai.api.core.PaginatedList import com.aallam.openai.api.core.RequestOptions import com.aallam.openai.api.core.SortOrder @@ -13,13 +14,16 @@ import com.aallam.openai.client.internal.http.perform import io.ktor.client.call.* import io.ktor.client.request.* import io.ktor.http.* +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.collect +import kotlinx.coroutines.flow.onEach internal class RunsApi(val requester: HttpRequester) : Runs { override suspend fun createRun(threadId: ThreadId, request: RunRequest, requestOptions: RequestOptions?): Run { return requester.perform { it.post { url(path = "${ApiPath.Threads}/${threadId.id}/runs") - setBody(request) + setBody(request.copy(stream = false)) contentType(ContentType.Application.Json) beta("assistants", 2) requestOptions(requestOptions) @@ -27,6 +31,20 @@ internal class RunsApi(val requester: HttpRequester) : Runs { } } + @BetaOpenAI + override suspend fun createStreamingRun(threadId: ThreadId, request: RunRequest, requestOptions: RequestOptions?) : Flow { + return requester + .performSse { + url(path = "${ApiPath.Threads}/${threadId.id}/runs") + setBody(request.copy(stream = true)) + contentType(ContentType.Application.Json) + accept(ContentType.Text.EventStream) + beta("assistants", 2) + requestOptions(requestOptions) + method = HttpMethod.Post + } + } + override suspend fun getRun(threadId: ThreadId, runId: RunId, requestOptions: RequestOptions?): Run { return requester.perform { it.get { @@ -95,6 +113,25 @@ internal class RunsApi(val requester: HttpRequester) : Runs { } } + @BetaOpenAI + override suspend fun submitStreamingToolOutput( + threadId: ThreadId, + runId: RunId, + output: List, + requestOptions: RequestOptions? + ) : Flow { + return requester + .performSse { + url(path = "${ApiPath.Threads}/${threadId.id}/runs/${runId.id}/submit_tool_outputs") + setBody(mapOf("tool_outputs" to output, "stream" to true)) + contentType(ContentType.Application.Json) + accept(ContentType.Text.EventStream) + beta("assistants", 2) + requestOptions(requestOptions) + method = HttpMethod.Post + } + } + override suspend fun cancel(threadId: ThreadId, runId: RunId, requestOptions: RequestOptions?): Run { return requester.perform { it.post { @@ -109,7 +146,7 @@ internal class RunsApi(val requester: HttpRequester) : Runs { return requester.perform { it.post { url(path = "${ApiPath.Threads}/runs") - setBody(request) + setBody(request.copy(stream = false)) contentType(ContentType.Application.Json) beta("assistants", 2) requestOptions(requestOptions) @@ -117,6 +154,24 @@ internal class RunsApi(val requester: HttpRequester) : Runs { } } + @BetaOpenAI + override suspend fun createStreamingThreadRun( + request: ThreadRunRequest, + requestOptions: RequestOptions? + ) : Flow { + return requester + .performSse { + url(path = "${ApiPath.Threads}/runs") + setBody(request.copy(stream = true)) + contentType(ContentType.Application.Json) + accept(ContentType.Text.EventStream) + beta("assistants", 2) + requestOptions(requestOptions) + method = HttpMethod.Post + } + } + + override suspend fun runStep( threadId: ThreadId, runId: RunId, diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/extension/Request.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/extension/Request.kt index 8aabcc8d..29aee15d 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/extension/Request.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/extension/Request.kt @@ -6,10 +6,10 @@ import com.aallam.openai.client.internal.JsonLenient import io.ktor.client.request.* import io.ktor.client.request.forms.* import io.ktor.http.* -import io.ktor.utils.io.core.* +import io.ktor.utils.io.core.readAvailable +import io.ktor.utils.io.core.writeFully +import kotlinx.io.buffered import kotlinx.serialization.json.* -import okio.buffer -import okio.use /** * Adds `stream` parameter to the request. @@ -27,14 +27,13 @@ internal inline fun streamRequestOf(serializable: T): JsonElement { } internal fun FormBuilder.appendFileSource(key: String, fileSource: FileSource) { - append(key, fileSource.name, ContentType.Application.OctetStream) { - fileSource.source.buffer().use { source -> + append(key = key, filename = fileSource.name, contentType = ContentType.Application.OctetStream) { + fileSource.source.buffered().use { source -> val buffer = ByteArray(8192) // 8 KiB var bytesRead: Int - while (source.read(buffer).also { bytesRead = it } != -1) { - writeFully(src = buffer, offset = 0, length = bytesRead) + while (source.readAvailable(buffer).also { bytesRead = it } != 0) { + writeFully(buffer = buffer, offset = 0, length = bytesRead) } - } } } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpRequester.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpRequester.kt index 2a00dd96..359f7cde 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpRequester.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpRequester.kt @@ -1,21 +1,32 @@ package com.aallam.openai.client.internal.http -import com.aallam.openai.client.Closeable +import com.aallam.openai.api.run.AssistantStreamEvent import io.ktor.client.* +import io.ktor.client.plugins.sse.ClientSSESession import io.ktor.client.request.* import io.ktor.client.statement.* +import io.ktor.sse.ServerSentEvent import io.ktor.util.reflect.* +import kotlinx.coroutines.flow.Flow /** * Http request performer. */ -internal interface HttpRequester : Closeable { +internal interface HttpRequester : AutoCloseable { /** * Perform an HTTP request and get a result. */ suspend fun perform(info: TypeInfo, block: suspend (HttpClient) -> HttpResponse): T + /** + * Perform an HTTP request and process emitted server-side events. + * + */ + suspend fun performSse( + builderBlock: HttpRequestBuilder.() -> Unit + ): Flow + /** * Perform an HTTP request and get a result. * diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpTransport.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpTransport.kt index c8c7bc03..445d2183 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpTransport.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/http/HttpTransport.kt @@ -1,15 +1,26 @@ package com.aallam.openai.client.internal.http import com.aallam.openai.api.exception.* +import com.aallam.openai.api.run.AssistantStreamEvent +import com.aallam.openai.client.extension.toAssistantStreamEvent +import com.aallam.openai.client.internal.api.ApiPath import io.ktor.client.* import io.ktor.client.call.* import io.ktor.client.network.sockets.* import io.ktor.client.plugins.* +import io.ktor.client.plugins.sse.ClientSSESession +import io.ktor.client.plugins.sse.sseSession import io.ktor.client.request.* import io.ktor.client.statement.* +import io.ktor.http.ContentType +import io.ktor.sse.ServerSentEvent import io.ktor.util.reflect.* import io.ktor.utils.io.errors.* import kotlinx.coroutines.CancellationException +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.collect +import kotlinx.coroutines.flow.map +import kotlinx.coroutines.flow.onEach /** HTTP transport layer */ internal class HttpTransport(private val httpClient: HttpClient) : HttpRequester { @@ -35,6 +46,19 @@ internal class HttpTransport(private val httpClient: HttpClient) : HttpRequester } } + override suspend fun performSse( + builderBlock: HttpRequestBuilder.() -> Unit + ): Flow { + try { + return httpClient + .sseSession(block = builderBlock) + .incoming + .map(ServerSentEvent::toAssistantStreamEvent) + } catch (e: Exception) { + throw handleException(e) + } + } + override fun close() { httpClient.close() } diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAssistants.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAssistants.kt index 671e09f5..35a02c62 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAssistants.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAssistants.kt @@ -4,12 +4,19 @@ import com.aallam.openai.api.assistant.AssistantResponseFormat import com.aallam.openai.api.assistant.AssistantTool import com.aallam.openai.api.assistant.assistantRequest import com.aallam.openai.api.chat.ToolCall -import com.aallam.openai.api.core.RequestOptions import com.aallam.openai.api.model.ModelId import com.aallam.openai.api.run.RequiredAction import com.aallam.openai.api.run.Run import com.aallam.openai.client.internal.JsonLenient -import kotlin.test.* +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.json.put +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertIs +import kotlin.test.assertNull +import kotlin.test.assertTrue class TestAssistants : TestOpenAI() { @@ -144,4 +151,64 @@ class TestAssistants : TestOpenAI() { val action = decoded.requiredAction as RequiredAction.SubmitToolOutputs assertIs(action.toolOutputs.toolCalls.first()) } + + @Test + fun jsonSchemaAssistant() = test { + val jsonSchema = AssistantResponseFormat.JSON_SCHEMA( + name = "TestSchema", + description = "A test schema", + schema = buildJsonObject { + put("type", "object") + put("properties", buildJsonObject { + put("name", buildJsonObject { + put("type", "string") + }) + }) + put("required", JsonArray(listOf(JsonPrimitive("name")))) + put("additionalProperties", false) + }, + strict = true + ) + + val request = assistantRequest { + name = "Schema Assistant" + model = ModelId("gpt-4o-mini") + responseFormat = jsonSchema + } + + val assistant = openAI.assistant( + request = request, + ) + assertEquals(request.name, assistant.name) + assertEquals(request.model, assistant.model) + assertEquals(request.responseFormat, assistant.responseFormat) + + val getAssistant = openAI.assistant( + assistant.id, + ) + assertEquals(getAssistant, assistant) + + val assistants = openAI.assistants() + assertTrue { assistants.isNotEmpty() } + + val updated = assistantRequest { + name = "Updated Schema Assistant" + responseFormat = AssistantResponseFormat.AUTO + } + val updatedAssistant = openAI.assistant( + assistant.id, + updated, + ) + assertEquals(updated.name, updatedAssistant.name) + assertEquals(updated.responseFormat, updatedAssistant.responseFormat) + + openAI.delete( + updatedAssistant.id, + ) + + val fileGetAfterDelete = openAI.assistant( + updatedAssistant.id, + ) + assertNull(fileGetAfterDelete) + } } diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAudio.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAudio.kt index fcc8fc75..563ebb5c 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAudio.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestAudio.kt @@ -5,8 +5,6 @@ import com.aallam.openai.api.file.FileSource import com.aallam.openai.api.model.ModelId import com.aallam.openai.client.internal.TestFileSystem import com.aallam.openai.client.internal.testFilePath -import okio.FileSystem -import okio.Path.Companion.toPath import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertTrue diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletionChunk.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletionChunk.kt index 90ff4d81..f11341fe 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletionChunk.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletionChunk.kt @@ -5,21 +5,22 @@ import com.aallam.openai.api.file.FileSource import com.aallam.openai.client.internal.JsonLenient import com.aallam.openai.client.internal.TestFileSystem import com.aallam.openai.client.internal.testFilePath +import kotlinx.io.buffered +import kotlinx.io.readByteArray import kotlin.test.Test -import okio.buffer class TestChatCompletionChunk { @Test fun testContentFilterDeserialization() { val json = FileSource(path = testFilePath("json/azureContentFilterChunk.json"), fileSystem = TestFileSystem) - val actualJson = json.source.buffer().readByteArray().decodeToString() + val actualJson = json.source.buffered().readByteArray().decodeToString() JsonLenient.decodeFromString(actualJson) } @Test fun testDeserialization() { val json = FileSource(path = testFilePath("json/chatChunk.json"), fileSystem = TestFileSystem) - val actualJson = json.source.buffer().readByteArray().decodeToString() + val actualJson = json.source.buffered().readByteArray().decodeToString() JsonLenient.decodeFromString(actualJson) } } diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletions.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletions.kt index 0a3f6c6f..5c06dc0c 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletions.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatCompletions.kt @@ -1,6 +1,7 @@ package com.aallam.openai.client import com.aallam.openai.api.chat.* +import com.aallam.openai.api.chat.ChatResponseFormat.Companion.jsonSchema import com.aallam.openai.api.model.ModelId import kotlinx.coroutines.flow.collect import kotlinx.coroutines.flow.launchIn @@ -9,6 +10,9 @@ import kotlinx.coroutines.launch import kotlinx.coroutines.test.advanceTimeBy import kotlinx.serialization.Serializable import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive import kotlin.coroutines.cancellation.CancellationException import kotlin.test.* @@ -131,6 +135,64 @@ class TestChatCompletions : TestOpenAI() { assertNotNull(answer.response) } + @Test + fun jsonSchema() = test { + val schemaJson = JsonObject(mapOf( + "type" to JsonPrimitive("object"), + "properties" to JsonObject(mapOf( + "question" to JsonObject(mapOf( + "type" to JsonPrimitive("string"), + "description" to JsonPrimitive("The question that was asked") + )), + "response" to JsonObject(mapOf( + "type" to JsonPrimitive("string"), + "description" to JsonPrimitive("The answer to the question") + )) + )), + "required" to JsonArray(listOf( + JsonPrimitive("question"), + JsonPrimitive("response") + )), + "additionalProperties" to JsonPrimitive(false) + )) + + val jsonSchema = JsonSchema( + name = "AnswerSchema", + schema = schemaJson, + strict = true + ) + + val request = chatCompletionRequest { + model = ModelId("gpt-4o-mini-2024-07-18") + responseFormat = jsonSchema(jsonSchema) + messages { + message { + role = ChatRole.System + content = "You are a helpful assistant.!" + } + message { + role = ChatRole.System + content = """All your answers should be a valid JSON + """.trimMargin() + } + message { + role = ChatRole.User + content = "Who won the world cup in 1998?" + } + } + } + val response = openAI.chatCompletion(request) + val content = response.choices.first().message.content.orEmpty() + + @Serializable + data class Answer(val question: String? = null, val response: String? = null) + + val answer = Json.decodeFromString(content) + assertNotNull(answer.question) + assertNotNull(answer.response) + } + + @Ignore @Test fun logprobs() = test { val request = chatCompletionRequest { @@ -149,6 +211,7 @@ class TestChatCompletions : TestOpenAI() { assertEquals(response.usage!!.completionTokens, logprobs.content!!.size) } + @Ignore @Test fun top_logprobs() = test { val expectedTopLogProbs = 5 @@ -167,7 +230,7 @@ class TestChatCompletions : TestOpenAI() { val logprobs = response.choices.first().logprobs assertNotNull(logprobs) assertEquals(response.usage!!.completionTokens, logprobs.content!!.size) - assertEquals(logprobs.content!![0].topLogprobs?.size, expectedTopLogProbs) + assertEquals(logprobs.content!![0].topLogprobs.size, expectedTopLogProbs) } @Test diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatVision.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatVision.kt index 8398f9fe..08bcb6a5 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatVision.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestChatVision.kt @@ -9,7 +9,7 @@ class TestChatVision : TestOpenAI() { @Test fun textimage() = test { val request = chatCompletionRequest { - model = ModelId("gpt-4-vision-preview") + model = ModelId("gpt-4o") messages { user { content { @@ -28,7 +28,7 @@ class TestChatVision : TestOpenAI() { @Test fun multiImage() = test { val request = chatCompletionRequest { - model = ModelId("gpt-4-vision-preview") + model = ModelId("gpt-4o") messages { user { content { diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFiles.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFiles.kt index c4522e89..cedd24d8 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFiles.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFiles.kt @@ -5,7 +5,6 @@ import com.aallam.openai.api.file.fileSource import com.aallam.openai.api.file.fileUpload import com.aallam.openai.client.internal.asSource import com.aallam.openai.client.internal.waitFileProcess -import ulid.ULID import kotlin.test.* class TestFiles : TestOpenAI() { @@ -17,7 +16,7 @@ class TestFiles : TestOpenAI() { {"prompt": "", "completion": ""} {"prompt": "", "completion": ""} """.trimIndent() - val id = ULID.randomULID() + val id = "d227742e-c572-4f51-b8a3-89f1d5105ebe" val source = fileSource { name = "$id.jsonl" diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFineTuning.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFineTuning.kt index 68bbe07e..0c21d98d 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFineTuning.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestFineTuning.kt @@ -7,7 +7,6 @@ import com.aallam.openai.api.finetuning.FineTuningRequest import com.aallam.openai.api.model.ModelId import com.aallam.openai.client.internal.asSource import com.aallam.openai.client.internal.waitFileProcess -import ulid.ULID import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertNotNull @@ -17,7 +16,7 @@ class TestFineTuning : TestOpenAI() { @Test fun fineTuningJob() = test { - val id = ULID.randomULID() + val id = "d227742e-c572-4f51-b8a3-89f1d5105ebe" val jsonl = """ {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestRuns.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestRuns.kt index 9f32318d..08b055c9 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestRuns.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestRuns.kt @@ -6,14 +6,23 @@ import com.aallam.openai.api.core.PaginatedList import com.aallam.openai.api.core.Role import com.aallam.openai.api.message.MessageRequest import com.aallam.openai.api.model.ModelId +import com.aallam.openai.api.run.AssistantStreamEvent +import com.aallam.openai.api.run.AssistantStreamEventType +import com.aallam.openai.api.run.Run import com.aallam.openai.api.run.RunRequest import com.aallam.openai.api.run.RunStep import com.aallam.openai.api.run.ThreadRunRequest import com.aallam.openai.api.thread.ThreadMessage import com.aallam.openai.api.thread.ThreadRequest +import com.aallam.openai.client.extension.getData import com.aallam.openai.client.internal.JsonLenient +import kotlinx.coroutines.flow.filter +import kotlinx.coroutines.flow.onEach +import kotlinx.coroutines.flow.singleOrNull import kotlin.test.Test import kotlin.test.assertEquals +import kotlin.test.assertNotNull +import kotlin.test.assertTrue class TestRuns : TestOpenAI() { @@ -47,6 +56,45 @@ class TestRuns : TestOpenAI() { assertEquals(1, runs.size) } + @Test + fun streamingRuns() = test { + val assistant = openAI.assistant( + request = assistantRequest { + name = "Math Tutor" + tools = listOf(AssistantTool.CodeInterpreter) + model = ModelId("gpt-4o") + } + ) + val thread = openAI.thread() + val request = RunRequest(assistantId = assistant.id) + openAI.message( + threadId = thread.id, + request = MessageRequest( + role = Role.User, + content = "solve me 1 + 1", + metadata = mapOf(), + ), + requestOptions = null, + ) + + val runCompletedEvent = openAI + .createStreamingRun(threadId = thread.id, request = request) + .filter { it.type == AssistantStreamEventType.THREAD_RUN_COMPLETED } + .singleOrNull() + + assertNotNull(runCompletedEvent) + + val run = runCompletedEvent.getData() + + assertEquals(thread.id, run.threadId) + + var retrieved = openAI.getRun(threadId = thread.id, runId = run.id) + assertEquals(run.id, retrieved.id) + + val runs = openAI.runs(threadId = thread.id) + assertEquals(1, runs.size) + } + @Test fun threadAndRuns() = test { val assistant = openAI.assistant( @@ -74,6 +122,42 @@ class TestRuns : TestOpenAI() { assertEquals(0, runs.size) } + @Test + fun streamingThreadAndRuns() = test { + val assistant = openAI.assistant( + request = assistantRequest { + name = "Math Tutor" + tools = listOf(AssistantTool.CodeInterpreter) + model = ModelId("gpt-4o") + } + ) + val request = ThreadRunRequest( + thread = ThreadRequest( + listOf( + ThreadMessage( + role = Role.User, + content = "solve 1 + 2", + ) + ) + ), + assistantId = assistant.id, + ) + + val runCompletedEvent = openAI + .createStreamingThreadRun(request = request) + .filter { it.type == AssistantStreamEventType.THREAD_RUN_COMPLETED } + .singleOrNull() + + assertNotNull(runCompletedEvent) + + val run = runCompletedEvent.getData() + + assertEquals(assistant.id, run.assistantId) + + val runs = openAI.runSteps(threadId = run.threadId, runId = run.id) + assertEquals(1, runs.size) + } + @Test fun json() = test { val json = """ diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestVectorStores.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestVectorStores.kt index 4bb54a2c..e1036179 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestVectorStores.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestVectorStores.kt @@ -7,6 +7,7 @@ import com.aallam.openai.api.vectorstore.FileBatchRequest import com.aallam.openai.api.vectorstore.VectorStoreFileRequest import com.aallam.openai.api.vectorstore.VectorStoreRequest import com.aallam.openai.client.internal.asSource +import kotlin.test.Ignore import kotlin.test.Test import kotlin.test.assertContains import kotlin.test.assertEquals @@ -35,6 +36,7 @@ class TestVectorStores : TestOpenAI() { assertEquals(true, deleted) } + @Ignore @Test fun testVectorStoreFiles() = test { val filetxt = """ diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Resource.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Resource.kt index 0ad1e90d..43915caa 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Resource.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Resource.kt @@ -1,21 +1,20 @@ package com.aallam.openai.client.internal -import okio.FileSystem -import okio.Path -import okio.Path.Companion.toPath +import kotlinx.io.files.Path +import kotlinx.io.files.SystemFileSystem /** * File system to access test files. */ -internal expect val TestFileSystem: FileSystem +internal val TestFileSystem = SystemFileSystem /** * Get [Path] of a given [fileName] test file. */ -fun testFilePath(fileName: String): Path = libRoot / "openai-client/src/commonTest/resources" / fileName +fun testFilePath(fileName: String): Path = Path(libRoot, "openai-client/src/commonTest/resources", fileName) /** * Get the library lib root. */ -private val libRoot - get() = env("LIB_ROOT")?.toPath() ?: error("Can't find `LIB_ROOT` environment variable") +private val libRoot: String + get() = env("LIB_ROOT") ?: error("Can't find `LIB_ROOT` environment variable") diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Source.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Source.kt index 403f0f2d..e702f6b3 100644 --- a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Source.kt +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/internal/Source.kt @@ -1,10 +1,11 @@ package com.aallam.openai.client.internal -import okio.Buffer -import okio.Source +import kotlinx.io.Buffer +import kotlinx.io.Source +import kotlinx.io.writeString internal fun String.asSource(): Source { val buffer = Buffer() - buffer.writeUtf8(this) + buffer.writeString(this) return buffer } diff --git a/openai-client/src/jsMain/kotlin/com/aallam/openai/client/Closeable.kt b/openai-client/src/jsMain/kotlin/com/aallam/openai/client/Closeable.kt deleted file mode 100644 index bd5e15f1..00000000 --- a/openai-client/src/jsMain/kotlin/com/aallam/openai/client/Closeable.kt +++ /dev/null @@ -1,5 +0,0 @@ -package com.aallam.openai.client - -public actual interface Closeable { - public actual fun close() -} diff --git a/openai-client/src/jsTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt b/openai-client/src/jsTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt deleted file mode 100644 index 076da047..00000000 --- a/openai-client/src/jsTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt +++ /dev/null @@ -1,8 +0,0 @@ -package com.aallam.openai.client.internal - -import okio.FileSystem -import okio.NodeJsFileSystem -import okio.Path.Companion.toPath -import okio.Source - -internal actual val TestFileSystem: FileSystem = NodeJsFileSystem diff --git a/openai-client/src/jvmMain/kotlin/com/aallam/openai/client/Closeable.kt b/openai-client/src/jvmMain/kotlin/com/aallam/openai/client/Closeable.kt deleted file mode 100644 index 6e85b27c..00000000 --- a/openai-client/src/jvmMain/kotlin/com/aallam/openai/client/Closeable.kt +++ /dev/null @@ -1,3 +0,0 @@ -package com.aallam.openai.client - -public actual typealias Closeable = AutoCloseable diff --git a/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/TestChatVisionJVM.kt b/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/TestChatVisionJVM.kt index 0046e93b..fc6419a1 100644 --- a/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/TestChatVisionJVM.kt +++ b/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/TestChatVisionJVM.kt @@ -2,20 +2,27 @@ package com.aallam.openai.client import com.aallam.openai.api.chat.* import com.aallam.openai.api.model.ModelId -import okio.FileSystem -import okio.Path.Companion.toPath +import kotlinx.io.buffered +import kotlinx.io.bytestring.encode +import kotlinx.io.files.Path +import kotlinx.io.files.SystemFileSystem +import kotlinx.io.readByteString +import kotlin.io.encoding.Base64 +import kotlin.io.encoding.ExperimentalEncodingApi + import kotlin.test.* class TestChatVisionJVM : TestOpenAI() { + @OptIn(ExperimentalEncodingApi::class) @Test fun encoded() = test { - val byteString = FileSystem.RESOURCES.read("nature.jpeg".toPath()) { - readByteString() + val byteString = SystemFileSystem.source(path = Path("src/jvmTest/resources/nature.jpeg")).buffered().use { + it.readByteString() } - val encoded = byteString.base64() + val encoded = Base64.encode(source = byteString) val request = chatCompletionRequest { - model = ModelId("gpt-4-vision-preview") + model = ModelId("gpt-4o") messages { user { content { diff --git a/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/internal/FileSystem.kt b/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/internal/FileSystem.kt deleted file mode 100644 index 704f385b..00000000 --- a/openai-client/src/jvmTest/kotlin/com/aallam/openai/client/internal/FileSystem.kt +++ /dev/null @@ -1,5 +0,0 @@ -package com.aallam.openai.client.internal - -import okio.FileSystem - -internal actual val TestFileSystem: FileSystem = FileSystem.SYSTEM diff --git a/openai-client/src/nativeMain/kotlin/com/aallam/openai/client/Closeable.kt b/openai-client/src/nativeMain/kotlin/com/aallam/openai/client/Closeable.kt deleted file mode 100644 index bd5e15f1..00000000 --- a/openai-client/src/nativeMain/kotlin/com/aallam/openai/client/Closeable.kt +++ /dev/null @@ -1,5 +0,0 @@ -package com.aallam.openai.client - -public actual interface Closeable { - public actual fun close() -} diff --git a/openai-client/src/nativeTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt b/openai-client/src/nativeTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt deleted file mode 100644 index 704f385b..00000000 --- a/openai-client/src/nativeTest/kotlin/com.aallam.openai.client/internal/FileSystem.kt +++ /dev/null @@ -1,5 +0,0 @@ -package com.aallam.openai.client.internal - -import okio.FileSystem - -internal actual val TestFileSystem: FileSystem = FileSystem.SYSTEM diff --git a/openai-client/src/wasmJsTest/kotlin/com.aallam.openai.client/internal/Env.kt b/openai-client/src/wasmJsTest/kotlin/com.aallam.openai.client/internal/Env.kt new file mode 100644 index 00000000..8f2bc916 --- /dev/null +++ b/openai-client/src/wasmJsTest/kotlin/com.aallam.openai.client/internal/Env.kt @@ -0,0 +1,7 @@ +package com.aallam.openai.client.internal + +internal actual fun env(name: String): String? { + return getEnv(name) +} + +fun getEnv(value: String): String? = js("""globalThis.process.env[value]""") diff --git a/openai-core/build.gradle.kts b/openai-core/build.gradle.kts index 6f23b4d0..fa4eed71 100644 --- a/openai-core/build.gradle.kts +++ b/openai-core/build.gradle.kts @@ -12,6 +12,7 @@ kotlin { explicitApi() jvm() jsNode() + jsWasm() native() sourceSets { @@ -22,7 +23,7 @@ kotlin { } val commonMain by getting { dependencies { - api(libs.okio) + api(libs.kotlinx.io.core) api(libs.serialization.json) implementation(libs.serialization.core) } @@ -43,5 +44,10 @@ kotlin { implementation(kotlin("test-js")) } } + val wasmJsTest by getting { + dependencies { + implementation(kotlin("test-wasm-js")) + } + } } } diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantRequest.kt index 26c95a0d..4ca6cbbc 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantRequest.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantRequest.kt @@ -67,7 +67,15 @@ public data class AssistantRequest( * Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo * models since gpt-3.5-turbo-1106. * - * Setting to [AssistantResponseFormat.JsonObject] enables JSON mode, which guarantees the message the model + * Setting to [AssistantResponseFormat.JSON_SCHEMA] enables Structured Outputs which ensures the model will match your supplied JSON schema. + * + * Structured Outputs ([AssistantResponseFormat.JSON_SCHEMA]) are available in our latest large language models, starting with GPT-4o: + * 1. gpt-4o-mini-2024-07-18 and later + * 2. gpt-4o-2024-08-06 and later + * + * Older models like gpt-4-turbo and earlier may use JSON mode ([AssistantResponseFormat.JSON_OBJECT]) instead. + * + * Setting to [AssistantResponseFormat.JSON_OBJECT] enables JSON mode, which guarantees the message the model * generates is valid JSON. * * important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user @@ -75,6 +83,7 @@ public data class AssistantRequest( * token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be * partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or * the conversation exceeded the max context length. + * */ @SerialName("response_format") val responseFormat: AssistantResponseFormat? = null, ) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantResponseFormat.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantResponseFormat.kt index ecef0607..d9135dfc 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantResponseFormat.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/assistant/AssistantResponseFormat.kt @@ -9,51 +9,73 @@ import kotlinx.serialization.descriptors.buildClassSerialDescriptor import kotlinx.serialization.descriptors.element import kotlinx.serialization.encoding.Decoder import kotlinx.serialization.encoding.Encoder -import kotlinx.serialization.json.JsonElement import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonObjectBuilder import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.booleanOrNull +import kotlinx.serialization.json.contentOrNull +import kotlinx.serialization.json.jsonObject import kotlinx.serialization.json.jsonPrimitive /** - * string: auto is the default value + * Represents the format of the response from the assistant. * - * object: An object describing the expected output of the model. If json_object only function type tools are allowed to be passed to the Run. - * If text, the model can return text or any value needed. - * type: string Must be one of text or json_object. + * @property type The type of the response format. + * @property jsonSchema The JSON schema associated with the response format, if type is "json_schema" otherwise null. */ @BetaOpenAI @Serializable(with = AssistantResponseFormat.ResponseFormatSerializer::class) public data class AssistantResponseFormat( - val format: String? = null, - val objectType: AssistantResponseType? = null, + val type: String, + val jsonSchema: JsonSchema? = null ) { + + /** + * Represents a JSON schema. + * + * @property name The name of the schema. + * @property description The description of the schema. + * @property schema The actual JSON schema. + * @property strict Indicates if the schema is strict. + */ @Serializable - public data class AssistantResponseType( - val type: String + public data class JsonSchema( + val name: String, + val description: String? = null, + val schema: JsonObject, + val strict: Boolean? = null ) public companion object { - public val AUTO: AssistantResponseFormat = AssistantResponseFormat(format = "auto") - public val TEXT: AssistantResponseFormat = AssistantResponseFormat(objectType = AssistantResponseType(type = "text")) - public val JSON_OBJECT: AssistantResponseFormat = AssistantResponseFormat(objectType = AssistantResponseType(type = "json_object")) + public val AUTO: AssistantResponseFormat = AssistantResponseFormat("auto") + public val TEXT: AssistantResponseFormat = AssistantResponseFormat("text") + public val JSON_OBJECT: AssistantResponseFormat = AssistantResponseFormat("json_object") + + /** + * Creates an instance of `AssistantResponseFormat` with type `json_schema`. + * + * @param name The name of the schema. + * @param description The description of the schema. + * @param schema The actual JSON schema. + * @param strict Indicates if the schema is strict. + * @return An instance of `AssistantResponseFormat` with the specified JSON schema. + */ + public fun JSON_SCHEMA( + name: String, + description: String? = null, + schema: JsonObject, + strict: Boolean? = null + ): AssistantResponseFormat = AssistantResponseFormat( + "json_schema", + JsonSchema(name, description, schema, strict) + ) } + public object ResponseFormatSerializer : KSerializer { override val descriptor: SerialDescriptor = buildClassSerialDescriptor("AssistantResponseFormat") { - element("format", isOptional = true) - element("type", isOptional = true) - } - - override fun serialize(encoder: Encoder, value: AssistantResponseFormat) { - val jsonEncoder = encoder as? kotlinx.serialization.json.JsonEncoder - ?: throw SerializationException("This class can be saved only by Json") - - if (value.format != null) { - jsonEncoder.encodeJsonElement(JsonPrimitive(value.format)) - } else if (value.objectType != null) { - val jsonElement: JsonElement = JsonObject(mapOf("type" to JsonPrimitive(value.objectType.type))) - jsonEncoder.encodeJsonElement(jsonElement) - } + element("type") + element("json_schema", isOptional = true) // Only for "json_schema" type } override fun deserialize(decoder: Decoder): AssistantResponseFormat { @@ -63,14 +85,63 @@ public data class AssistantResponseFormat( val jsonElement = jsonDecoder.decodeJsonElement() return when { jsonElement is JsonPrimitive && jsonElement.isString -> { - AssistantResponseFormat(format = jsonElement.content) + AssistantResponseFormat(type = jsonElement.content) } jsonElement is JsonObject && "type" in jsonElement -> { val type = jsonElement["type"]!!.jsonPrimitive.content - AssistantResponseFormat(objectType = AssistantResponseType(type)) + when (type) { + "json_schema" -> { + val schemaObject = jsonElement["json_schema"]?.jsonObject + val name = schemaObject?.get("name")?.jsonPrimitive?.content ?: "" + val description = schemaObject?.get("description")?.jsonPrimitive?.contentOrNull + val schema = schemaObject?.get("schema")?.jsonObject ?: JsonObject(emptyMap()) + val strict = schemaObject?.get("strict")?.jsonPrimitive?.booleanOrNull + AssistantResponseFormat( + type = "json_schema", + jsonSchema = JsonSchema(name = name, description = description, schema = schema, strict = strict) + ) + } + "json_object" -> AssistantResponseFormat(type = "json_object") + "auto" -> AssistantResponseFormat(type = "auto") + "text" -> AssistantResponseFormat(type = "text") + else -> throw SerializationException("Unknown response format type: $type") + } } else -> throw SerializationException("Unknown response format: $jsonElement") } } + + override fun serialize(encoder: Encoder, value: AssistantResponseFormat) { + val jsonEncoder = encoder as? kotlinx.serialization.json.JsonEncoder + ?: throw SerializationException("This class can be saved only by Json") + + val jsonElement = when (value.type) { + "json_schema" -> { + JsonObject( + mapOf( + "type" to JsonPrimitive("json_schema"), + "json_schema" to JsonObject( + mapOf( + "name" to JsonPrimitive(value.jsonSchema?.name ?: ""), + "description" to JsonPrimitive(value.jsonSchema?.description ?: ""), + "schema" to (value.jsonSchema?.schema ?: JsonObject(emptyMap())), + "strict" to JsonPrimitive(value.jsonSchema?.strict ?: false) + ) + ) + ) + ) + } + "json_object" -> JsonObject(mapOf("type" to JsonPrimitive("json_object"))) + "auto" -> JsonPrimitive("auto") + "text" -> JsonObject(mapOf("type" to JsonPrimitive("text"))) + else -> throw SerializationException("Unsupported response format type: ${value.type}") + } + jsonEncoder.encodeJsonElement(jsonElement) + } + } } + +public fun JsonObject.Companion.buildJsonObject(block: JsonObjectBuilder.() -> Unit): JsonObject { + return kotlinx.serialization.json.buildJsonObject(block) +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionChunk.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionChunk.kt index 209ffd08..bdde1808 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionChunk.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionChunk.kt @@ -22,7 +22,7 @@ public data class ChatCompletionChunk( * The creation time in epoch milliseconds. */ @SerialName("created") - public val created: Int, + public val created: Long, /** * The model used. diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionRequest.kt index df19b925..b41851bc 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionRequest.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatCompletionRequest.kt @@ -14,7 +14,7 @@ import kotlinx.serialization.json.JsonObjectBuilder * Creates a completion for the chat message. */ @Serializable -public class ChatCompletionRequest( +public data class ChatCompletionRequest( /** * ID of the model to use. */ @@ -25,6 +25,12 @@ public class ChatCompletionRequest( */ @SerialName("messages") public val messages: List, + /** + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + @SerialName("reasoning_effort") public val reasoningEffort: Effort? = null, + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, * while lower values like 0.2 will make it more focused and deterministic. @@ -52,12 +58,24 @@ public class ChatCompletionRequest( */ @SerialName("stop") public val stop: List? = null, + /** + * Whether to store the output of this chat completion request for use in our model distillation or evals products + */ + @SerialName("store") public val store: Boolean? = null, + /** * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can * return will be (4096 - prompt tokens). */ + @Deprecated(message = "Deprecated in favor of `max_completion_tokens`") @SerialName("max_tokens") public val maxTokens: Int? = null, + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and reasoning tokens. + */ + @SerialName("max_completion_tokens") public val maxCompletionTokens: Int? = null, + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, * increasing the model's likelihood to talk about new topics. @@ -191,6 +209,12 @@ public class ChatCompletionRequestBuilder { */ public var messages: List? = null + /** + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + public val reasoningEffort: Effort? = null + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, * while lower values like 0.2 will make it more focused and deterministic. @@ -218,12 +242,24 @@ public class ChatCompletionRequestBuilder { */ public var stop: List? = null + /** + * Whether to store the output of this chat completion request for use in our model distillation or evals products + */ + public val store: Boolean? = null + /** * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can * return will be (4096 - prompt tokens). */ + @Deprecated(message = "Deprecated in favor of `max_completion_tokens`") public var maxTokens: Int? = null + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and reasoning tokens. + */ + public val maxCompletionTokens: Int? = null + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, * increasing the model's likelihood to talk about new topics. @@ -354,11 +390,14 @@ public class ChatCompletionRequestBuilder { public fun build(): ChatCompletionRequest = ChatCompletionRequest( model = requireNotNull(model) { "model is required" }, messages = requireNotNull(messages) { "messages is required" }, + reasoningEffort = reasoningEffort, temperature = temperature, topP = topP, n = n, stop = stop, + store = store, maxTokens = maxTokens, + maxCompletionTokens = maxCompletionTokens, presencePenalty = presencePenalty, frequencyPenalty = frequencyPenalty, logitBias = logitBias, diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatResponseFormat.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatResponseFormat.kt index 39a24c94..564c8b6e 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatResponseFormat.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ChatResponseFormat.kt @@ -2,6 +2,7 @@ package com.aallam.openai.api.chat import kotlinx.serialization.SerialName import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonObject /** * An object specifying the format that the model must output. @@ -11,9 +12,13 @@ public data class ChatResponseFormat( /** * Response format type. */ - @SerialName("type") val type: String -) { + @SerialName("type") val type: String, + /** + * Optional JSON schema specification when type is "json_schema" + */ + @SerialName("json_schema") val jsonSchema: JsonSchema? = null +) { public companion object { /** * JSON mode, which guarantees the message the model generates, is valid JSON. @@ -24,5 +29,32 @@ public data class ChatResponseFormat( * Default text mode. */ public val Text: ChatResponseFormat = ChatResponseFormat(type = "text") + + /** + * Creates a JSON schema response format with the specified schema + */ + public fun jsonSchema(schema: JsonSchema): ChatResponseFormat = + ChatResponseFormat(type = "json_schema", jsonSchema = schema) } } + +/** + * Specification for JSON schema response format + */ +@Serializable +public data class JsonSchema( + /** + * Optional name for the schema + */ + @SerialName("name") val name: String? = null, + + /** + * The JSON schema specification + */ + @SerialName("schema") val schema: JsonObject, + + /** + * Whether to enforce strict schema validation + */ + @SerialName("strict") val strict: Boolean? = null +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/Effort.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/Effort.kt new file mode 100644 index 00000000..9837d8d7 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/Effort.kt @@ -0,0 +1,11 @@ +package com.aallam.openai.api.chat + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Reasoning Effort. + */ +@Serializable +@JvmInline +public value class Effort(public val id: String) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ToolChoice.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ToolChoice.kt index 60eccc04..adf32db3 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ToolChoice.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/chat/ToolChoice.kt @@ -36,6 +36,9 @@ public sealed interface ToolChoice { /** Represents the `none` mode. */ public val None: ToolChoice = Mode("none") + /** Represents the `required` mode. */ + public val Required: ToolChoice = Mode("required") + /** Specifies a function for the model to call **/ public fun function(name: String): ToolChoice = Named(type = ToolType.Function, function = FunctionToolChoice(name = name)) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/file/FileSource.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/file/FileSource.kt index e9e7cafe..2b6a6c66 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/file/FileSource.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/file/FileSource.kt @@ -1,9 +1,11 @@ package com.aallam.openai.api.file import com.aallam.openai.api.OpenAIDsl -import okio.FileSystem -import okio.Path -import okio.Source +import kotlinx.io.RawSource +import kotlinx.io.Source +import kotlinx.io.files.FileSystem +import kotlinx.io.files.Path +import kotlinx.io.files.SystemFileSystem /** * Represents a file resource. @@ -17,16 +19,16 @@ public class FileSource( /** * File source. */ - public val source: Source, + public val source: RawSource, ) { /** * Create [FileSource] instance. * - * @param path file path to upload + * @param path the file path to upload * @param fileSystem file system to be used */ - public constructor(path: Path, fileSystem: FileSystem) : this(path.name, fileSystem.source(path)) + public constructor(path: Path, fileSystem: FileSystem = SystemFileSystem) : this(path.name, fileSystem.source(path)) } /** diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/Attachment.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/Attachment.kt new file mode 100644 index 00000000..6ec31813 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/Attachment.kt @@ -0,0 +1,51 @@ +package com.aallam.openai.api.message + +import com.aallam.openai.api.BetaOpenAI +import com.aallam.openai.api.assistant.AssistantTool +import com.aallam.openai.api.file.FileId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * References an Attachment in the message request. + */ +@Serializable +@OptIn(BetaOpenAI::class) +public data class Attachment( + /** + * The ID of the file to attach to the message. + */ + @SerialName("file_id") val fileId: FileId? = null, + + /** + * The tools to add this file to. + */ + @SerialName("tools") val tools: List? = null, +) + +/** + * A message attachment builder. + */ +public fun attachment(block: AttachmentBuilder.() -> Unit): Attachment = AttachmentBuilder().apply(block).build() + +/** + * A message attachment builder. + */ +public class AttachmentBuilder { + /** + * The ID of the file to attach to the message. + */ + public var fileId: FileId? = null + + /** + * The tools to add this file to. + */ + @OptIn(BetaOpenAI::class) + public var tools: List? = null + + /** + * Build the attachment. + */ + @OptIn(BetaOpenAI::class) + public fun build(): Attachment = Attachment(fileId, tools) +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageContent.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageContent.kt index d0da1df1..b7dd8bed 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageContent.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageContent.kt @@ -107,7 +107,7 @@ public data class FileCitation( /** * The specific quote in the file */ - @SerialName("quote") val quote: String, + @SerialName("quote") val quote: String? = null, ) /** diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageRequest.kt index 09f2e48d..4cfec89f 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageRequest.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/message/MessageRequest.kt @@ -2,7 +2,6 @@ package com.aallam.openai.api.message import com.aallam.openai.api.BetaOpenAI import com.aallam.openai.api.core.Role -import com.aallam.openai.api.file.FileId import kotlinx.serialization.SerialName import kotlinx.serialization.Serializable @@ -22,6 +21,11 @@ public class MessageRequest( */ @SerialName("content") public val content: String, + /** + * A list of files attached to the message. + */ + @SerialName("attachments") public val attachments: List? = null, + /** * Set of 16 key-value pairs that can be attached to an object. * This can be useful for storing additional information about the object in a structured format. @@ -52,6 +56,11 @@ public class MessageRequestBuilder { */ public var content: String? = null + /** + * A list of files attached to the message. + */ + public var attachments: List? = null + /** * Set of 16 key-value pairs that can be attached to an object. * This can be useful for storing additional information about the object in a structured format. @@ -62,6 +71,7 @@ public class MessageRequestBuilder { public fun build(): MessageRequest = MessageRequest( role = requireNotNull(role) { "role is required" }, content = requireNotNull(content) { "content is required" }, + attachments = attachments, metadata = metadata ) } diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/AssistantStreamEvent.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/AssistantStreamEvent.kt new file mode 100644 index 00000000..264038ba --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/AssistantStreamEvent.kt @@ -0,0 +1,251 @@ +package com.aallam.openai.api.run + +import com.aallam.openai.api.BetaOpenAI +import com.aallam.openai.api.core.Role +import com.aallam.openai.api.message.Message +import com.aallam.openai.api.message.MessageContent +import com.aallam.openai.api.message.MessageId +import com.aallam.openai.api.thread.Thread +import kotlinx.serialization.KSerializer +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.builtins.serializer +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.PrimitiveSerialDescriptor +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlin.reflect.KClass + +/** + * Represents an event emitted when streaming a run. + * @property rawType the raw string of the event type. + * @property type the type of the event or [AssistantStreamEventType.UNKNOWN] if unrecognized. + * @property data the string serialized representation of the data for the event. + */ +@BetaOpenAI +@Serializable +public data class AssistantStreamEvent( + @SerialName("rawType") val rawType: String?, + @SerialName("type") val type: AssistantStreamEventType, + @SerialName("data") val data: String? +) + +/** + * Represents a run step delta i.e. any changed fields on a run step during streaming. + * @property id the identifier of the run step, which can be referenced in API endpoints. + * @property object the object type, which is always thread.run.step.delta. + * @property delta the delta containing the fields that have changed on the run step. + */ +@BetaOpenAI +@Serializable +public data class RunStepDelta( + @SerialName("id") val id: RunStepId, + @SerialName("object") val `object`: String, + @SerialName("delta") val delta: RunStepDeltaData +) + +/** + * The delta containing the fields that have changed on the run step. + * @property stepDetails the details of the run step. + */ +@BetaOpenAI +@Serializable +public data class RunStepDeltaData( + @SerialName("step_details") val stepDetails: RunStepDetails +) + +/** + * Represents a message delta i.e. any changed fields on a message during streaming. + * @param id the identifier of the message, which can be referenced in API endpoints. + * @param object the object type, which is always thread.message.delta. + * @param delta the delta containing the fields that have changed on the message. + */ +@BetaOpenAI +@Serializable +public data class MessageDelta( + @SerialName("id") val id: MessageId, + @SerialName("object") val `object`: String, + @SerialName("delta") val delta: MessageDeltaData +) + +/** + * The delta containing the fields that have changed on the message. + * @param role the entity that produced the message. One of user or assistant. + * @param content the content of the message in array of text and/or images. + */ +@BetaOpenAI +@Serializable +public data class MessageDeltaData( + @SerialName("role") val role: Role, + @SerialName("content") val content: MessageContent +) + +/** + * Represents an event type emitted when streaming a Run. + * @property event the string representation of event type. + * @property dataType the type of the data. + * @property serializer the serializer corresponding to the data type. + */ +@BetaOpenAI +@Serializable(with = AssistantStreamEventTypeSerializer::class) +public enum class AssistantStreamEventType( + public val event: String, + @Suppress("MemberVisibilityCanBePrivate") public val dataType: KClass<*>, + public val serializer: KSerializer<*> +) { + + /** + * Occurs when a new thread is created. + */ + THREAD_CREATED("thread.created", Thread::class, Thread.serializer()), + + /** + * Occurs when a new run is created. + */ + THREAD_RUN_CREATED("thread.run.created", Run::class, Run.serializer()), + + /** + * Occurs when a run moves to a queued status. + */ + THREAD_RUN_QUEUED("thread.run.queued", Run::class, Run.serializer()), + + /** + * Occurs when a run moves to an in_progress status. + */ + THREAD_RUN_IN_PROGRESS("thread.run.in_progress", Run::class, Run.serializer()), + + /** + * Occurs when a run moves to a requires_action status. + */ + THREAD_RUN_REQUIRES_ACTION("thread.run.requires_action", Run::class, Run.serializer()), + + /** + * Occurs when a run is completed. + */ + THREAD_RUN_COMPLETED("thread.run.completed", Run::class, Run.serializer()), + + /** + * Occurs when a run ends with status incomplete. + */ + THREAD_RUN_INCOMPLETE("thread.run.incomplete", Run::class, Run.serializer()), + + /** + * Occurs when a run fails. + */ + THREAD_RUN_FAILED("thread.run.failed", Run::class, Run.serializer()), + + /** + * Occurs when a run moves to a cancelling status. + */ + THREAD_RUN_CANCELLING("thread.run.cancelling", Run::class, Run.serializer()), + + /** + * Occurs when a run is cancelled. + */ + THREAD_RUN_CANCELLED("thread.run.cancelled", Run::class, Run.serializer()), + + /** + * Occurs when a run expires. + */ + THREAD_RUN_EXPIRED("thread.run.expired", Run::class, Run.serializer()), + + /** + * Occurs when a run step is created. + */ + THREAD_RUN_STEP_CREATED("thread.run.step.created", RunStep::class, RunStep.serializer()), + + /** + * Occurs when a run step moves to an in_progress state. + */ + THREAD_RUN_STEP_IN_PROGRESS("thread.run.step.in_progress", RunStep::class, RunStep.serializer()), + + /** + * Occurs when parts of a run step are being streamed. + */ + THREAD_RUN_STEP_DELTA("thread.run.step.delta", RunStepDelta::class, RunStepDelta.serializer()), + + /** + * Occurs when a run step is completed. + */ + THREAD_RUN_STEP_COMPLETED("thread.run.step.completed", RunStep::class, RunStep.serializer()), + + /** + * Occurs when a run step fails. + */ + THREAD_RUN_STEP_FAILED("thread.run.step.failed", RunStep::class, RunStep.serializer()), + + /** + * Occurs when a run step is cancelled. + */ + THREAD_RUN_STEP_CANCELLED("thread.run.step.cancelled", RunStep::class, RunStep.serializer()), + + /** + * Occurs when a run step expires. + */ + THREAD_RUN_STEP_EXPIRED("thread.run.step.expired", RunStep::class, RunStep.serializer()), + + /** + * Occurs when a message is created. + */ + THREAD_MESSAGE_CREATED("thread.message.created", Message::class, Message.serializer()), + + /** + * Occurs when a message moves to an in_progress state. + */ + THREAD_MESSAGE_IN_PROGRESS("thread.message.in_progress", Message::class, Message.serializer()), + + /** + * Occurs when parts of a Message are being streamed. + */ + THREAD_MESSAGE_DELTA("thread.message.delta", MessageDelta::class, MessageDelta.serializer()), + + /** + * Occurs when a message is completed. + */ + THREAD_MESSAGE_COMPLETED("thread.message.completed", Message::class, Message.serializer()), + + /** + * Occurs when a message ends before it is completed. + */ + THREAD_MESSAGE_INCOMPLETE("thread.message.incomplete", Message::class, Message.serializer()), + + /** + * Occurs when an error occurs. This can happen due to an internal server error or a timeout. + */ + ERROR("error", String::class, String.serializer()), + + /** + * Occurs when a stream ends. + * data is [DONE] + */ + DONE("done", String::class, String.serializer()), + + /** + * Occurs when the event type is not recognized + */ + UNKNOWN("unknown", String::class, String.serializer()); + + public companion object { + public fun fromEvent(event: String): AssistantStreamEventType = + entries + .find { it.event == event } + ?: UNKNOWN + } +} + +/** + * Custom serializer for [AssistantStreamEventType]. + */ +@OptIn(BetaOpenAI::class) +public class AssistantStreamEventTypeSerializer : KSerializer { + override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor("AssistantStreamEventType", PrimitiveKind.STRING) + + override fun deserialize(decoder: Decoder): AssistantStreamEventType { + val value = decoder.decodeString() + return AssistantStreamEventType.entries.single { value == it.event } + } + override fun serialize(encoder: Encoder, value: AssistantStreamEventType) { + encoder.encodeString(value.event) + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/Run.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/Run.kt index 5bd08469..d617b78d 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/Run.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/Run.kt @@ -7,6 +7,7 @@ import com.aallam.openai.api.core.Status import com.aallam.openai.api.model.ModelId import com.aallam.openai.api.thread.ThreadId import com.aallam.openai.api.core.LastError +import com.aallam.openai.api.core.Usage import kotlinx.serialization.SerialName import kotlinx.serialization.Serializable @@ -102,4 +103,30 @@ public data class Run( * Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long. */ @SerialName("metadata") val metadata: Map? = null, + + /** + * Usage statistics related to the run. + * This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.). + */ + @SerialName("usage") public val usage: Usage? = null, + + /** + * The sampling temperature used for this run. If not set, defaults to 1. + */ + @SerialName("temperature") val temperature: Double? = null, + + /** + * The nucleus sampling value used for this run. If not set, defaults to 1. + */ + @SerialName("top_p") val topP: Double? = null, + + /** + * The maximum number of prompt tokens specified to have been used over the course of the run. + */ + @SerialName("max_prompt_tokens") val maxPromptTokens: Int? = null, + + /** + * The maximum number of completion tokens specified to have been used over the course of the run. + */ + @SerialName("max_completion_tokens") val maxCompletionTokens: Int? = null, ) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunRequest.kt index a41334bc..dc59bfee 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunRequest.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunRequest.kt @@ -48,6 +48,11 @@ public data class RunRequest( * Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long. */ @SerialName("metadata") val metadata: Map? = null, + + /** + * Enables streaming events for this run. Will be overridden based on the api call being made. + */ + @SerialName("stream") val stream: Boolean = false ) /** diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunStepDetails.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunStepDetails.kt index 2c7220a7..12601951 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunStepDetails.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/RunStepDetails.kt @@ -47,8 +47,8 @@ public data class MessageCreation( public data class ToolCallStepDetails( /** * An array of tool calls the run step was involved in. - * These can be associated with one of three types of tools: - * [ToolCallStep.CodeInterpreter], [ToolCallStep.RetrievalTool], or [ToolCallStep.FunctionTool]. + * These can be associated with one of four types of tools: + * [ToolCallStep.CodeInterpreter], [ToolCallStep.RetrievalTool], [ToolCallStep.FunctionTool], or [ToolCallStep.FileSearchTool]. */ @SerialName("tool_calls") public val toolCalls: List? = null, ) : RunStepDetails @@ -98,6 +98,20 @@ public sealed interface ToolCallStep { */ @SerialName("function") public val function: FunctionToolCallStep, ) : ToolCallStep + + @BetaOpenAI + @Serializable + @SerialName("file_search") + public data class FileSearchTool( + /** + * The ID of the tool call object. + */ + @SerialName("id") public val id: ToolCallStepId, + /** + * The options and results of the file search. + */ + @SerialName("file_search") public val fileSearch: FileSearchToolCallStep, + ) : ToolCallStep } @BetaOpenAI @@ -119,6 +133,53 @@ public data class FunctionToolCallStep( @SerialName("output") public val output: String? = null, ) +@BetaOpenAI +@Serializable +public data class FileSearchToolCallStep( + /** + * The configured options for ranking. + */ + @SerialName("ranking_options") public val rankingOptions: FileSearchToolCallRankingOptions, + + /** + * The returned results of the file search, ordered by score. + */ + @SerialName("results") public val results: List, +) + +@BetaOpenAI +@Serializable +public data class FileSearchToolCallRankingOptions( + /** + * The configured ranker. + */ + @SerialName("ranker") public val ranker: String, + + /** + * The configured score threshold. + */ + @SerialName("score_threshold") public val scoreThreshold: Double, +) + +@BetaOpenAI +@Serializable +public data class FileSearchToolCallResult( + /** + * The ID of the file object. + */ + @SerialName("file_id") public val fileId: FileId, + + /** + * The original filename of the file object. + */ + @SerialName("file_name") public val fileName: String, + + /** + * The score given to the provided result. + */ + @SerialName("score") public val score: Double, +) + @BetaOpenAI @Serializable public data class CodeInterpreterToolCall( diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/ThreadRunRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/ThreadRunRequest.kt index ee013c93..33f527a1 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/ThreadRunRequest.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/run/ThreadRunRequest.kt @@ -50,6 +50,11 @@ public data class ThreadRunRequest( * Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long. */ @SerialName("metadata") val metadata: Map? = null, + + /** + * Enables streaming events for this run. Will be overridden based on the api call being made. + */ + @SerialName("stream") val stream: Boolean = false ) /** diff --git a/renovate.json b/renovate.json index 2d30eef4..3bd82868 100644 --- a/renovate.json +++ b/renovate.json @@ -1,8 +1,8 @@ { + "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "config:base", - "docker:disable", - "group:all" + "docker:disable" ], "dependencyDashboardApproval": true } \ No newline at end of file diff --git a/sample/README.md b/sample/README.md index 3ad191cb..0f07f81e 100644 --- a/sample/README.md +++ b/sample/README.md @@ -7,7 +7,7 @@ JVM, JS and Native sample apps. | Target | command | |--------|-------------------------------------------------------| | jvm | `./gradlew :sample:jvm:run` | -| js | `./gradlew :sample:js:run` | +| js | `./gradlew :sample:js:jsNodeDevelopmentRun` | | native | `./gradlew :sample:native:runReleaseExecutableNative` | _PS: environment variable `OPENAI_API_KEY` must be set._ diff --git a/sample/js/build.gradle.kts b/sample/js/build.gradle.kts index 5a14f1d0..39f0feb8 100644 --- a/sample/js/build.gradle.kts +++ b/sample/js/build.gradle.kts @@ -15,7 +15,6 @@ kotlin { dependencies { //implementation("com.aallam.openai:openai-client:") implementation(projects.openaiClient) - implementation(libs.okio.nodefilesystem) } } } diff --git a/sample/js/src/main/kotlin/main.kt b/sample/js/src/jsMain/kotlin/main.kt similarity index 75% rename from sample/js/src/main/kotlin/main.kt rename to sample/js/src/jsMain/kotlin/main.kt index 34e762e6..916772f8 100644 --- a/sample/js/src/main/kotlin/main.kt +++ b/sample/js/src/jsMain/kotlin/main.kt @@ -12,45 +12,28 @@ import com.aallam.openai.api.image.ImageSize import com.aallam.openai.api.logging.LogLevel import com.aallam.openai.api.model.ModelId import com.aallam.openai.api.moderation.ModerationRequest -import com.aallam.openai.api.LegacyOpenAI import com.aallam.openai.client.LoggingConfig import com.aallam.openai.client.OpenAI import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.flow.launchIn import kotlinx.coroutines.flow.onCompletion import kotlinx.coroutines.flow.onEach -import okio.NodeJsFileSystem -import okio.Path.Companion.toPath +import kotlinx.io.files.Path import kotlin.coroutines.coroutineContext -@OptIn(LegacyOpenAI::class) suspend fun main() { val apiKey = js("process.env.OPENAI_API_KEY").unsafeCast() val token = requireNotNull(apiKey) { "OPENAI_API_KEY environment variable must be set." } - val openAI = OpenAI(token = token, logging = LoggingConfig(LogLevel.All)) + val openAI = OpenAI(token = token, logging = LoggingConfig(LogLevel.Info)) val scope = CoroutineScope(coroutineContext) println("> Getting available models...") openAI.models().forEach(::println) - println("\n> Getting ada model...") - val ada = openAI.model(modelId = ModelId("text-ada-001")) + println("\n> Getting model...") + val ada = openAI.model(modelId = ModelId("gpt-3.5-turbo")) println(ada) - println("\n>️ Creating completion...") - val completionRequest = CompletionRequest( - model = ada.id, - prompt = "Somebody once told me the world is gonna roll me" - ) - openAI.completion(completionRequest).choices.forEach(::println) - - println("\n>️ Creating completion stream...") - openAI.completions(completionRequest) - .onEach { print(it.choices[0].text) } - .onCompletion { println() } - .launchIn(scope) - .join() - println("\n> Read files...") val files = openAI.files() println(files) @@ -75,8 +58,8 @@ suspend fun main() { println("\n> Edit images...") val imageEdit = ImageEdit( - image = FileSource(path = "kotlin/image.png".toPath(), fileSystem = NodeJsFileSystem), - mask = FileSource(path = "kotlin/mask.png".toPath(), fileSystem = NodeJsFileSystem), + image = FileSource(path = Path("kotlin","image.png")), + mask = FileSource(path = Path("kotlin","mask.png")), prompt = "a sunlit indoor lounge area with a pool containing a flamingo", n = 1, size = ImageSize.is1024x1024, @@ -109,7 +92,7 @@ suspend fun main() { println("\n>️ Create transcription...") val transcriptionRequest = TranscriptionRequest( - audio = FileSource(path = "kotlin/micro-machines.wav".toPath(), fileSystem = NodeJsFileSystem), + audio = FileSource(path = Path("kotlin","micro-machines.wav")), model = ModelId("whisper-1"), ) val transcription = openAI.transcription(transcriptionRequest) @@ -117,7 +100,7 @@ suspend fun main() { println("\n>️ Create translation...") val translationRequest = TranslationRequest( - audio = FileSource(path = "kotlin/multilingual.wav".toPath(), fileSystem = NodeJsFileSystem), + audio = FileSource(path = Path("kotlin", "multilingual.wav")), model = ModelId("whisper-1"), ) val translation = openAI.translation(translationRequest) diff --git a/sample/js/src/main/resources/image.png b/sample/js/src/jsMain/resources/image.png similarity index 100% rename from sample/js/src/main/resources/image.png rename to sample/js/src/jsMain/resources/image.png diff --git a/sample/js/src/main/resources/mask.png b/sample/js/src/jsMain/resources/mask.png similarity index 100% rename from sample/js/src/main/resources/mask.png rename to sample/js/src/jsMain/resources/mask.png diff --git a/sample/js/src/main/resources/micro-machines.wav b/sample/js/src/jsMain/resources/micro-machines.wav similarity index 100% rename from sample/js/src/main/resources/micro-machines.wav rename to sample/js/src/jsMain/resources/micro-machines.wav diff --git a/sample/js/src/main/resources/multilingual.wav b/sample/js/src/jsMain/resources/multilingual.wav similarity index 100% rename from sample/js/src/main/resources/multilingual.wav rename to sample/js/src/jsMain/resources/multilingual.wav diff --git a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsFunction.kt b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsFunction.kt index 30f5a99f..40a4153a 100644 --- a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsFunction.kt +++ b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsFunction.kt @@ -2,6 +2,7 @@ package com.aallam.openai.sample.jvm import com.aallam.openai.api.BetaOpenAI import com.aallam.openai.api.assistant.AssistantRequest +import com.aallam.openai.api.assistant.AssistantResponseFormat import com.aallam.openai.api.assistant.AssistantTool import com.aallam.openai.api.assistant.Function import com.aallam.openai.api.chat.ToolCall @@ -17,7 +18,10 @@ import com.aallam.openai.api.run.RunRequest import com.aallam.openai.api.run.ToolOutput import com.aallam.openai.client.OpenAI import kotlinx.coroutines.delay +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonPrimitive import kotlinx.serialization.json.add +import kotlinx.serialization.json.buildJsonObject import kotlinx.serialization.json.put import kotlinx.serialization.json.putJsonArray import kotlinx.serialization.json.putJsonObject @@ -29,6 +33,36 @@ suspend fun assistantsFunctions(openAI: OpenAI) { request = AssistantRequest( name = "Math Tutor", instructions = "You are a weather bot. Use the provided functions to answer questions.", + responseFormat = AssistantResponseFormat.JSON_SCHEMA( + name = "math_response", + strict = true, + schema = buildJsonObject { + put("type", "object") + putJsonObject("properties") { + putJsonObject("steps") { + put("type", "array") + putJsonObject("items") { + put("type", "object") + putJsonObject("properties") { + putJsonObject("explanation") { + put("type", "string") + } + putJsonObject("output") { + put("type", "string") + } + } + put("required", JsonArray(listOf(JsonPrimitive("explanation"), JsonPrimitive("output")))) + put("additionalProperties", false) + } + } + putJsonObject("final_answer") { + put("type", "string") + } + } + put("additionalProperties", false) + put("required", JsonArray(listOf(JsonPrimitive("steps"), JsonPrimitive("final_answer")))) + }, + ), tools = listOf( AssistantTool.FunctionTool( function = Function( @@ -74,7 +108,7 @@ suspend fun assistantsFunctions(openAI: OpenAI) { ) ) ), - model = ModelId("gpt-4-1106-preview") + model = ModelId("gpt-4o-mini") ) ) diff --git a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsRetrieval.kt b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsRetrieval.kt index 57c80468..94f33fec 100644 --- a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsRetrieval.kt +++ b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/AssistantsRetrieval.kt @@ -14,14 +14,13 @@ import com.aallam.openai.api.model.ModelId import com.aallam.openai.api.run.RunRequest import com.aallam.openai.client.OpenAI import kotlinx.coroutines.delay -import okio.FileSystem -import okio.Path.Companion.toPath +import kotlinx.io.files.Path @OptIn(BetaOpenAI::class) suspend fun assistantsRetrieval(openAI: OpenAI) { // 1. Upload a file with an "assistants" purpose - val fileUpload = FileUpload(file = FileSource("udhr.pdf".toPath(), FileSystem.RESOURCES), purpose = Purpose("assistants")) + val fileUpload = FileUpload(file = FileSource(Path("udhr.pdf")), purpose = Purpose("assistants")) val knowledgeBase = openAI.file(request = fileUpload) val assistant = openAI.assistant( @@ -30,7 +29,6 @@ suspend fun assistantsRetrieval(openAI: OpenAI) { instructions = "You are a chatbot specialized in 'The Universal Declaration of Human Rights.' Answer questions and provide information based on this document.", tools = listOf(AssistantTool.FileSearch), model = ModelId("gpt-4-1106-preview"), - fileIds = listOf(knowledgeBase.id) ) ) diff --git a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Resources.kt b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Resources.kt new file mode 100644 index 00000000..e76d33a2 --- /dev/null +++ b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Resources.kt @@ -0,0 +1,14 @@ +package com.aallam.openai.sample.jvm + +import kotlinx.io.files.Path + +object Resources { + fun path(resource: String): Path { + return Path(path = getPath(resource)) + } + + private fun getPath(resource: String): String { + return Resources::class.java.getResource("/$resource")?.path + ?: throw IllegalStateException("Resource $resource not found") + } +} diff --git a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Whisper.kt b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Whisper.kt index dc41696f..3f8e6f55 100644 --- a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Whisper.kt +++ b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/Whisper.kt @@ -5,13 +5,11 @@ import com.aallam.openai.api.audio.TranslationRequest import com.aallam.openai.api.file.FileSource import com.aallam.openai.api.model.ModelId import com.aallam.openai.client.OpenAI -import okio.FileSystem -import okio.Path.Companion.toPath suspend fun whisper(openAI: OpenAI) { println("\n>️ Create transcription...") val transcriptionRequest = TranscriptionRequest( - audio = FileSource(path = "micro-machines.wav".toPath(), fileSystem = FileSystem.RESOURCES), + audio = FileSource(path = Resources.path("micro-machines.wav")), model = ModelId("whisper-1"), ) val transcription = openAI.transcription(transcriptionRequest) @@ -19,7 +17,7 @@ suspend fun whisper(openAI: OpenAI) { println("\n>️ Create translation...") val translationRequest = TranslationRequest( - audio = FileSource(path = "multilingual.wav".toPath(), fileSystem = FileSystem.RESOURCES), + audio = FileSource(path = Resources.path("multilingual.wav")), model = ModelId("whisper-1"), ) val translation = openAI.translation(translationRequest) diff --git a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/images.kt b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/images.kt index 7035c852..f00284ee 100644 --- a/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/images.kt +++ b/sample/jvm/src/main/kotlin/com/aallam/openai/sample/jvm/images.kt @@ -5,8 +5,6 @@ import com.aallam.openai.api.image.ImageCreation import com.aallam.openai.api.image.ImageEdit import com.aallam.openai.api.image.ImageSize import com.aallam.openai.client.OpenAI -import okio.FileSystem -import okio.Path.Companion.toPath suspend fun images(openAI: OpenAI) { println("\n> Create images...") @@ -21,8 +19,8 @@ suspend fun images(openAI: OpenAI) { println("\n> Edit images...") val imageEdit = ImageEdit( - image = FileSource(path = "image.png".toPath(), fileSystem = FileSystem.RESOURCES), - mask = FileSource(path = "image.png".toPath(), fileSystem = FileSystem.RESOURCES), + image = FileSource(path = Resources.path("image.png")), + mask = FileSource(path = Resources.path("image.png")), prompt = "a sunlit indoor lounge area with a pool containing a flamingo", n = 1, size = ImageSize.is1024x1024, diff --git a/sample/native/src/nativeMain/kotlin/main.kt b/sample/native/src/nativeMain/kotlin/main.kt index 38bea70c..d5cd61ec 100644 --- a/sample/native/src/nativeMain/kotlin/main.kt +++ b/sample/native/src/nativeMain/kotlin/main.kt @@ -18,8 +18,7 @@ import kotlinx.coroutines.flow.launchIn import kotlinx.coroutines.flow.onCompletion import kotlinx.coroutines.flow.onEach import kotlinx.coroutines.runBlocking -import okio.FileSystem -import okio.Path.Companion.toPath +import kotlinx.io.files.Path import platform.posix.getenv fun main(): Unit = runBlocking { @@ -62,8 +61,8 @@ fun main(): Unit = runBlocking { println("\n> Edit images...") val imageEdit = ImageEdit( - image = FileSource(path = "$resourcesPrefix/image.png".toPath(), fileSystem = FileSystem.SYSTEM), - mask = FileSource(path = "$resourcesPrefix/mask.png".toPath(), fileSystem = FileSystem.SYSTEM), + image = FileSource(path = Path(resourcesPrefix, "image.png")), + mask = FileSource(path = Path(resourcesPrefix, "mask.png")), prompt = "a sunlit indoor lounge area with a pool containing a flamingo", n = 1, size = ImageSize.is1024x1024, @@ -96,7 +95,7 @@ fun main(): Unit = runBlocking { println("\n>️ Create transcription...") val transcriptionRequest = TranscriptionRequest( - audio = FileSource(path = "$resourcesPrefix/micro-machines.wav".toPath(), fileSystem = FileSystem.SYSTEM), + audio = FileSource(path = Path(resourcesPrefix, "micro-machines.wav")), model = ModelId("whisper-1"), ) val transcription = openAI.transcription(transcriptionRequest) @@ -104,7 +103,7 @@ fun main(): Unit = runBlocking { println("\n>️ Create translation...") val translationRequest = TranslationRequest( - audio = FileSource(path = "$resourcesPrefix/multilingual.wav".toPath(), fileSystem = FileSystem.SYSTEM), + audio = FileSource(path = Path(resourcesPrefix, "multilingual.wav")), model = ModelId("whisper-1"), ) val translation = openAI.translation(translationRequest)