@@ -874,7 +874,6 @@ export const generateImage = async (
874
874
runtime . imageModelProvider === ModelProviderName . LLAMACLOUD
875
875
) {
876
876
const together = new Together ( { apiKey : apiKey as string } ) ;
877
- // Fix: steps 4 is for schnell; 28 is for dev.
878
877
const response = await together . images . create ( {
879
878
model : "black-forest-labs/FLUX.1-schnell" ,
880
879
prompt : data . prompt ,
@@ -883,23 +882,41 @@ export const generateImage = async (
883
882
steps : modelSettings ?. steps ?? 4 ,
884
883
n : data . count ,
885
884
} ) ;
886
- const urls : string [ ] = [ ] ;
887
- for ( let i = 0 ; i < response . data . length ; i ++ ) {
888
- const json = response . data [ i ] . b64_json ;
889
- // decode base64
890
- const base64 = Buffer . from ( json , "base64" ) . toString ( "base64" ) ;
891
- urls . push ( base64 ) ;
885
+
886
+ // Add type assertion to handle the response properly
887
+ const togetherResponse = response as unknown as TogetherAIImageResponse ;
888
+
889
+ if ( ! togetherResponse . data || ! Array . isArray ( togetherResponse . data ) ) {
890
+ throw new Error ( "Invalid response format from Together AI" ) ;
892
891
}
893
- const base64s = await Promise . all (
894
- urls . map ( async ( url ) => {
895
- const response = await fetch ( url ) ;
896
- const blob = await response . blob ( ) ;
897
- const buffer = await blob . arrayBuffer ( ) ;
898
- let base64 = Buffer . from ( buffer ) . toString ( "base64" ) ;
899
- base64 = "data:image/jpeg;base64," + base64 ;
900
- return base64 ;
901
- } )
902
- ) ;
892
+
893
+ // Rest of the code remains the same...
894
+ const base64s = await Promise . all ( togetherResponse . data . map ( async ( image ) => {
895
+ if ( ! image . url ) {
896
+ elizaLogger . error ( "Missing URL in image data:" , image ) ;
897
+ throw new Error ( "Missing URL in Together AI response" ) ;
898
+ }
899
+
900
+ // Fetch the image from the URL
901
+ const imageResponse = await fetch ( image . url ) ;
902
+ if ( ! imageResponse . ok ) {
903
+ throw new Error ( `Failed to fetch image: ${ imageResponse . statusText } ` ) ;
904
+ }
905
+
906
+ // Convert to blob and then to base64
907
+ const blob = await imageResponse . blob ( ) ;
908
+ const arrayBuffer = await blob . arrayBuffer ( ) ;
909
+ const base64 = Buffer . from ( arrayBuffer ) . toString ( 'base64' ) ;
910
+
911
+ // Return with proper MIME type
912
+ return `data:image/jpeg;base64,${ base64 } ` ;
913
+ } ) ) ;
914
+
915
+ if ( base64s . length === 0 ) {
916
+ throw new Error ( "No images generated by Together AI" ) ;
917
+ }
918
+
919
+ elizaLogger . debug ( `Generated ${ base64s . length } images` ) ;
903
920
return { success : true , data : base64s } ;
904
921
} else if ( runtime . imageModelProvider === ModelProviderName . FAL ) {
905
922
fal . config ( {
@@ -1406,3 +1423,12 @@ async function handleOllama({
1406
1423
...modelOptions ,
1407
1424
} ) ;
1408
1425
}
1426
+
1427
+ // Add type definition for Together AI response
1428
+ interface TogetherAIImageResponse {
1429
+ data : Array < {
1430
+ url : string ;
1431
+ content_type ?: string ;
1432
+ image_type ?: string ;
1433
+ } > ;
1434
+ }
0 commit comments