abort-all-requests.ts 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. import ollama from 'ollama'
  2. // Set a timeout to abort all requests after 5 seconds
  3. setTimeout(() => {
  4. console.log('\nAborting all requests...\n')
  5. ollama.abort()
  6. }, 5000) // 5000 milliseconds = 5 seconds
  7. // Start multiple concurrent streaming requests
  8. Promise.all([
  9. ollama.generate({
  10. model: 'llama3.2',
  11. prompt: 'Write a long story about dragons',
  12. stream: true,
  13. }).then(
  14. async (stream) => {
  15. console.log(' Starting stream for dragons story...')
  16. for await (const chunk of stream) {
  17. process.stdout.write(' 1> ' + chunk.response)
  18. }
  19. }
  20. ),
  21. ollama.generate({
  22. model: 'llama3.2',
  23. prompt: 'Write a long story about wizards',
  24. stream: true,
  25. }).then(
  26. async (stream) => {
  27. console.log(' Starting stream for wizards story...')
  28. for await (const chunk of stream) {
  29. process.stdout.write(' 2> ' + chunk.response)
  30. }
  31. }
  32. ),
  33. ollama.generate({
  34. model: 'llama3.2',
  35. prompt: 'Write a long story about knights',
  36. stream: true,
  37. }).then(
  38. async (stream) => {
  39. console.log(' Starting stream for knights story...')
  40. for await (const chunk of stream) {
  41. process.stdout.write(' 3>' + chunk.response)
  42. }
  43. }
  44. )
  45. ]).catch(error => {
  46. if (error.name === 'AbortError') {
  47. console.log('All requests have been aborted')
  48. } else {
  49. console.error('An error occurred:', error)
  50. }
  51. })