@@ -2572,4 +2572,80 @@ describe.skip("InferenceClient", () => {
25722572		} , 
25732573		TIMEOUT 
25742574	) ; 
2575+ 	describe . concurrent ( 
2576+ 		"SiliconFlow" , 
2577+ 		( )  =>  { 
2578+ 			const  client  =  new  InferenceClient ( env . HF_SILICONFLOW_KEY  ??  "dummy" ) ; 
2579+ 
2580+ 			HARDCODED_MODEL_INFERENCE_MAPPING [ "siliconflow" ]  =  { 
2581+ 				"deepseek-ai/DeepSeek-R1" : { 
2582+ 					provider : "siliconflow" , 
2583+ 					hfModelId : "deepseek-ai/DeepSeek-R1" , 
2584+ 					providerId : "deepseek-ai/DeepSeek-R1" , 
2585+ 					status : "live" , 
2586+ 					task : "conversational" , 
2587+ 				} , 
2588+ 				"deepseek-ai/DeepSeek-V3" : { 
2589+ 					provider : "siliconflow" , 
2590+ 					hfModelId : "deepseek-ai/DeepSeek-V3" , 
2591+ 					providerId : "deepseek-ai/DeepSeek-V3" , 
2592+ 					status : "live" , 
2593+ 					task : "conversational" , 
2594+ 				} , 
2595+ 			} ; 
2596+ 
2597+ 			it ( "chatCompletion - DeepSeek-R1" ,  async  ( )  =>  { 
2598+ 				const  res  =  await  client . chatCompletion ( { 
2599+ 					model : "deepseek-ai/DeepSeek-R1" , 
2600+ 					provider : "siliconflow" , 
2601+ 					messages : [ {  role : "user" ,  content : "What is the capital of France?"  } ] , 
2602+ 					max_tokens : 20 , 
2603+ 				} ) ; 
2604+ 				if  ( res . choices  &&  res . choices . length  >  0 )  { 
2605+ 					const  completion  =  res . choices [ 0 ] . message ?. content ; 
2606+ 					expect ( completion ) . toBeDefined ( ) ; 
2607+ 					expect ( typeof  completion ) . toBe ( "string" ) ; 
2608+ 					expect ( completion ) . toMatch ( / P a r i s / i) ; 
2609+ 				} 
2610+ 			} ) ; 
2611+ 
2612+ 			it ( "chatCompletion - DeepSeek-V3" ,  async  ( )  =>  { 
2613+ 				const  res  =  await  client . chatCompletion ( { 
2614+ 					model : "deepseek-ai/DeepSeek-V3" , 
2615+ 					provider : "siliconflow" , 
2616+ 					messages : [ {  role : "user" ,  content : "The weather today is"  } ] , 
2617+ 					max_tokens : 10 , 
2618+ 				} ) ; 
2619+ 				expect ( res . choices ) . toBeDefined ( ) ; 
2620+ 				expect ( res . choices ?. length ) . toBeGreaterThan ( 0 ) ; 
2621+ 				expect ( res . choices ?. [ 0 ] . message ?. content ) . toBeDefined ( ) ; 
2622+ 				expect ( typeof  res . choices ?. [ 0 ] . message ?. content ) . toBe ( "string" ) ; 
2623+ 				expect ( res . choices ?. [ 0 ] . message ?. content ?. length ) . toBeGreaterThan ( 0 ) ; 
2624+ 			} ) ; 
2625+ 
2626+ 			it ( "chatCompletion stream" ,  async  ( )  =>  { 
2627+ 				const  stream  =  client . chatCompletionStream ( { 
2628+ 					model : "deepseek-ai/DeepSeek-R1" , 
2629+ 					provider : "siliconflow" , 
2630+ 					messages : [ {  role : "user" ,  content : "Say 'this is a test'"  } ] , 
2631+ 					stream : true , 
2632+ 				} )  as  AsyncGenerator < ChatCompletionStreamOutput > ; 
2633+ 
2634+ 				let  fullResponse  =  "" ; 
2635+ 				for  await  ( const  chunk  of  stream )  { 
2636+ 					if  ( chunk . choices  &&  chunk . choices . length  >  0 )  { 
2637+ 						const  content  =  chunk . choices [ 0 ] . delta ?. content ; 
2638+ 						if  ( content )  { 
2639+ 							fullResponse  +=  content ; 
2640+ 						} 
2641+ 					} 
2642+ 				} 
2643+ 
2644+ 				// Verify we got a meaningful response 
2645+ 				expect ( fullResponse ) . toBeTruthy ( ) ; 
2646+ 				expect ( fullResponse . length ) . toBeGreaterThan ( 0 ) ; 
2647+ 			} ) ; 
2648+ 		} , 
2649+ 		TIMEOUT 
2650+ 	) ; 
25752651} ) ; 
0 commit comments