77 withTrace ,
88 type ResponseStreamEvent ,
99} from '@openai/agents-core' ;
10+ import type { ResponseStreamEvent as OpenAIResponseStreamEvent } from 'openai/resources/responses/responses' ;
1011
1112describe ( 'OpenAIResponsesModel' , ( ) => {
1213 beforeAll ( ( ) => {
@@ -412,11 +413,20 @@ describe('OpenAIResponsesModel', () => {
412413 it ( 'getStreamedResponse yields events and calls client with stream flag' , async ( ) => {
413414 await withTrace ( 'test' , async ( ) => {
414415 const fakeResponse = { id : 'res2' , usage : { } , output : [ ] } ;
415- const events : ResponseStreamEvent [ ] = [
416- { type : 'response.created' , response : fakeResponse as any } ,
416+ const events : OpenAIResponseStreamEvent [ ] = [
417+ {
418+ type : 'response.created' ,
419+ response : fakeResponse as any ,
420+ sequence_number : 0 ,
421+ } ,
417422 {
418423 type : 'response.output_text.delta' ,
424+ content_index : 0 ,
419425 delta : 'delta' ,
426+ item_id : 'item-1' ,
427+ logprobs : [ ] ,
428+ output_index : 0 ,
429+ sequence_number : 1 ,
420430 } as any ,
421431 ] ;
422432 async function * fakeStream ( ) {
@@ -462,6 +472,11 @@ describe('OpenAIResponsesModel', () => {
462472 type : 'output_text_delta' ,
463473 delta : 'delta' ,
464474 providerData : {
475+ content_index : 0 ,
476+ item_id : 'item-1' ,
477+ logprobs : [ ] ,
478+ output_index : 0 ,
479+ sequence_number : 1 ,
465480 type : 'response.output_text.delta' ,
466481 } ,
467482 } ,
@@ -472,4 +487,65 @@ describe('OpenAIResponsesModel', () => {
472487 ] ) ;
473488 } ) ;
474489 } ) ;
490+
491+ it ( 'getStreamedResponse maps streamed usage data onto response_done events' , async ( ) => {
492+ await withTrace ( 'test' , async ( ) => {
493+ const createdEvent : OpenAIResponseStreamEvent = {
494+ type : 'response.created' ,
495+ response : { id : 'res-stream-init' } as any ,
496+ sequence_number : 0 ,
497+ } ;
498+ const completedEvent : OpenAIResponseStreamEvent = {
499+ type : 'response.completed' ,
500+ response : {
501+ id : 'res-stream' ,
502+ output : [ ] ,
503+ usage : {
504+ input_tokens : 11 ,
505+ output_tokens : 5 ,
506+ total_tokens : 16 ,
507+ input_tokens_details : { cached_tokens : 2 } ,
508+ output_tokens_details : { reasoning_tokens : 3 } ,
509+ } ,
510+ } ,
511+ sequence_number : 1 ,
512+ } as any ;
513+ async function * fakeStream ( ) {
514+ yield createdEvent ;
515+ yield completedEvent ;
516+ }
517+ const createMock = vi . fn ( ) . mockResolvedValue ( fakeStream ( ) ) ;
518+ const fakeClient = {
519+ responses : { create : createMock } ,
520+ } as unknown as OpenAI ;
521+ const model = new OpenAIResponsesModel ( fakeClient , 'model-usage' ) ;
522+
523+ const request = {
524+ systemInstructions : undefined ,
525+ input : 'payload' ,
526+ modelSettings : { } ,
527+ tools : [ ] ,
528+ outputType : 'text' ,
529+ handoffs : [ ] ,
530+ tracing : false ,
531+ signal : undefined ,
532+ } ;
533+
534+ const received : ResponseStreamEvent [ ] = [ ] ;
535+ for await ( const ev of model . getStreamedResponse ( request as any ) ) {
536+ received . push ( ev ) ;
537+ }
538+
539+ const responseDone = received . find ( ( ev ) => ev . type === 'response_done' ) ;
540+ expect ( responseDone ) . toBeDefined ( ) ;
541+ expect ( ( responseDone as any ) . response . id ) . toBe ( 'res-stream' ) ;
542+ expect ( ( responseDone as any ) . response . usage ) . toEqual ( {
543+ inputTokens : 11 ,
544+ outputTokens : 5 ,
545+ totalTokens : 16 ,
546+ inputTokensDetails : { cached_tokens : 2 } ,
547+ outputTokensDetails : { reasoning_tokens : 3 } ,
548+ } ) ;
549+ } ) ;
550+ } ) ;
475551} ) ;
0 commit comments