Quellcode durchsuchen

Clarified aborting examples and readme (#157)

Clarified aborting examples and updated readme
Parth Sareen vor 6 Monaten
Ursprung
Commit
c97f231bce

+ 4 - 2
README.md

@@ -204,8 +204,10 @@ ollama.ps()
 ollama.abort()
 ```
 
-This method will abort all streamed generations currently running.
-All asynchronous threads listening to streams (typically the ```for await (const part of response)```) will throw an ```AbortError``` exception
+This method will abort **all** streamed generations currently running with the client instance.
+If there is a need to manage streams with timeouts, it is recommended to have one Ollama client per stream.
+
+All asynchronous threads listening to streams (typically the ```for await (const part of response)```) will throw an ```AbortError``` exception. See [examples/abort/abort-all-requests.ts](examples/abort/abort-all-requests.ts) for an example.
 
 ## Custom client
 

+ 55 - 0
examples/abort/abort-all-requests.ts

@@ -0,0 +1,55 @@
+import ollama from 'ollama'
+
+// Set a timeout to abort all requests after 5 seconds
+setTimeout(() => {
+  console.log('\nAborting all requests...\n')
+  ollama.abort()
+}, 5000) // 5000 milliseconds = 5 seconds
+
+// Start multiple concurrent streaming requests
+Promise.all([
+  ollama.generate({
+    model: 'llama3.2',
+    prompt: 'Write a long story about dragons',
+    stream: true,
+  }).then(
+    async (stream) => {
+      console.log(' Starting stream for dragons story...')
+      for await (const chunk of stream) {
+        process.stdout.write(' 1> ' + chunk.response)
+      }
+    }
+  ),
+
+  ollama.generate({
+    model: 'llama3.2', 
+    prompt: 'Write a long story about wizards',
+    stream: true,
+  }).then(
+    async (stream) => {
+      console.log(' Starting stream for wizards story...')
+      for await (const chunk of stream) {
+        process.stdout.write(' 2> ' + chunk.response)
+      }
+    }
+  ),
+
+  ollama.generate({
+    model: 'llama3.2',
+    prompt: 'Write a long story about knights',
+    stream: true,
+  }).then(
+    async (stream) => {
+      console.log(' Starting stream for knights story...')
+      for await (const chunk of stream) {
+        process.stdout.write(' 3>' + chunk.response)
+      }
+    }
+  )
+]).catch(error => {
+  if (error.name === 'AbortError') {
+    console.log('All requests have been aborted')
+  } else {
+    console.error('An error occurred:', error)
+  }
+})

+ 50 - 0
examples/abort/abort-single-request.ts

@@ -0,0 +1,50 @@
+import { Ollama } from 'ollama'
+
+// Create multiple ollama clients
+const client1 = new Ollama()
+const client2 = new Ollama()
+
+// Set a timeout to abort just the first request after 5 seconds
+setTimeout(() => {
+  console.log('\nAborting dragons story...\n')
+  // abort the first client
+  client1.abort()
+}, 5000) // 5000 milliseconds = 5 seconds
+
+// Start multiple concurrent streaming requests with different clients
+Promise.all([
+  client1.generate({
+    model: 'llama3.2',
+    prompt: 'Write a long story about dragons',
+    stream: true,
+  }).then(
+    async (stream) => {
+      console.log(' Starting stream for dragons story...')
+      for await (const chunk of stream) {
+        process.stdout.write(' 1> ' + chunk.response)
+      }
+    }
+  ),
+
+  client2.generate({
+    model: 'llama3.2', 
+    prompt: 'Write a short story about wizards',
+    stream: true,
+  }).then(
+    async (stream) => {
+      console.log(' Starting stream for wizards story...')
+      for await (const chunk of stream) {
+        process.stdout.write(' 2> ' + chunk.response)
+      }
+    }
+  ),
+
+]).catch(error => {
+  if (error.name === 'AbortError') {
+    console.log('Dragons story request has been aborted')
+  } else {
+    console.error('An error occurred:', error)
+  }
+})
+
+

+ 0 - 27
examples/abort/any-request.ts

@@ -1,27 +0,0 @@
-import ollama from 'ollama'
-
-// Set a timeout to abort the request after 1 second
-setTimeout(() => {
-  console.log('\nAborting request...\n')
-  ollama.abort()
-}, 1000) // 1000 milliseconds = 1 second
-
-ollama.generate({
-    model: 'llama3.1',
-    prompt: 'Write a long story',
-    stream: true,
-  }).then(
-    async (stream) => {
-      for await (const chunk of stream) {
-        process.stdout.write(chunk.response)
-      }
-    }
-  ).catch(
-    (error) => {
-      if (error.name === 'AbortError') {
-        console.log('The request has been aborted')
-      } else {
-        console.error('An error occurred:', error)
-      }
-    }
-  )

+ 0 - 31
examples/abort/specific-request.ts

@@ -1,31 +0,0 @@
-import ollama from 'ollama'
-import { AbortableAsyncIterator } from '../../src/utils'
-
-let stream: AbortableAsyncIterator<object>
-
-// Set a timeout to abort the request after 1 second
-setTimeout(() => {
-  console.log('\nAborting request...\n')
-  stream.abort()
-}, 1000) // 1000 milliseconds = 1 second
-
-ollama.generate({
-    model: 'llama3.1',
-    prompt: 'Write a long story',
-    stream: true,
-  }).then(
-    async (_stream) => {
-      stream = _stream
-      for await (const chunk of stream) {
-        process.stdout.write(chunk.response)
-      }
-    }
-  ).catch(
-    (error) => {
-      if (error.name === 'AbortError') {
-        console.log('The request has been aborted')
-      } else {
-        console.error('An error occurred:', error)
-      }
-    }
-  )