@@ -2,7 +2,6 @@ import Groq from 'groq-sdk';
22import { KnownError } from './error.js' ;
33import type { CommitType } from './config.js' ;
44import { generatePrompt } from './prompt.js' ;
5- import { chunkDiff , splitDiffByFile , estimateTokenCount } from './git.js' ;
65
76const createChatCompletion = async (
87 apiKey : string ,
@@ -121,224 +120,6 @@ const deriveMessageFromReasoning = (text: string, maxLength: number): string | n
121120 return candidate ;
122121} ;
123122
124- export const generateCommitMessage = async (
125- apiKey : string ,
126- model : string ,
127- locale : string ,
128- diff : string ,
129- completions : number ,
130- maxLength : number ,
131- type : CommitType ,
132- timeout : number ,
133- proxy ?: string
134- ) => {
135- try {
136- const completion = await createChatCompletion (
137- apiKey ,
138- model ,
139- [
140- {
141- role : 'system' ,
142- content : generatePrompt ( locale , maxLength , type ) ,
143- } ,
144- {
145- role : 'user' ,
146- content : diff ,
147- } ,
148- ] ,
149- 0.7 ,
150- 1 ,
151- 0 ,
152- 0 ,
153- Math . max ( 200 , maxLength * 8 ) ,
154- completions ,
155- timeout ,
156- proxy
157- ) ;
158-
159- const messages = completion . choices
160- . map ( ( choice ) => choice . message ?. content || '' )
161- . map ( ( text ) => sanitizeMessage ( text as string ) )
162- . filter ( Boolean ) ;
163-
164- if ( messages . length > 0 ) return deduplicateMessages ( messages ) ;
165-
166- // Fallback: some Groq models return reasoning with an empty content
167- const reasoningCandidates = ( completion . choices as any [ ] )
168- . map ( ( c ) => ( c as any ) . message ?. reasoning || '' )
169- . filter ( Boolean ) as string [ ] ;
170- for ( const reason of reasoningCandidates ) {
171- const derived = deriveMessageFromReasoning ( reason , maxLength ) ;
172- if ( derived ) return [ derived ] ;
173- }
174-
175- return [ ] ;
176- } catch ( error ) {
177- const errorAsAny = error as any ;
178- if ( errorAsAny . code === 'ENOTFOUND' ) {
179- throw new KnownError (
180- `Error connecting to ${ errorAsAny . hostname } (${ errorAsAny . syscall } ). Are you connected to the internet?`
181- ) ;
182- }
183-
184- throw errorAsAny ;
185- }
186- } ;
187-
188- export const generateCommitMessageFromChunks = async (
189- apiKey : string ,
190- model : string ,
191- locale : string ,
192- diff : string ,
193- completions : number ,
194- maxLength : number ,
195- type : CommitType ,
196- timeout : number ,
197- proxy ?: string ,
198- chunkSize : number = 6000
199- ) => {
200- // Strategy: split by file first to avoid crossing file boundaries
201- const fileDiffs = splitDiffByFile ( diff ) ;
202- const perFileChunks = fileDiffs . flatMap ( fd => chunkDiff ( fd , chunkSize ) ) ;
203- const chunks = perFileChunks . length > 0 ? perFileChunks : chunkDiff ( diff , chunkSize ) ;
204-
205- if ( chunks . length === 1 ) {
206- try {
207- return await generateCommitMessage (
208- apiKey ,
209- model ,
210- locale ,
211- diff ,
212- completions ,
213- maxLength ,
214- type ,
215- timeout ,
216- proxy
217- ) ;
218- } catch ( error ) {
219- throw new KnownError ( `Failed to generate commit message: ${ error instanceof Error ? error . message : 'Unknown error' } ` ) ;
220- }
221- }
222-
223- // Multiple chunks - generate commit messages for each chunk
224- const chunkMessages : string [ ] = [ ] ;
225-
226- for ( let i = 0 ; i < chunks . length ; i ++ ) {
227- const chunk = chunks [ i ] ;
228- const approxInputTokens = estimateTokenCount ( chunk ) + 1200 ; // reserve for prompt/system
229- let effectiveMaxTokens = Math . max ( 200 , maxLength * 8 ) ;
230- // If close to model limit, reduce output tokens
231- if ( approxInputTokens + effectiveMaxTokens > 7500 ) {
232- effectiveMaxTokens = Math . max ( 200 , 7500 - approxInputTokens ) ;
233- }
234-
235- const chunkPrompt = `Analyze this git diff and propose a concise commit message limited to ${ maxLength } characters. Focus on the most significant intent of the change.\n\n${ chunk } ` ;
236-
237- try {
238- const messages = await createChatCompletion (
239- apiKey ,
240- model ,
241- [
242- { role : 'system' , content : generatePrompt ( locale , maxLength , type ) } ,
243- { role : 'user' , content : chunkPrompt } ,
244- ] ,
245- 0.7 ,
246- 1 ,
247- 0 ,
248- 0 ,
249- effectiveMaxTokens ,
250- 1 ,
251- timeout ,
252- proxy
253- ) ;
254-
255- const texts = ( messages . choices || [ ] )
256- . map ( c => c . message ?. content )
257- . filter ( Boolean ) as string [ ] ;
258- if ( texts . length > 0 ) {
259- chunkMessages . push ( sanitizeMessage ( texts [ 0 ] ) ) ;
260- } else {
261- const reasons = ( messages . choices as any [ ] ) . map ( ( c :any ) => c . message ?. reasoning || '' ) . filter ( Boolean ) as string [ ] ;
262- if ( reasons . length > 0 ) {
263- const derived = deriveMessageFromReasoning ( reasons [ 0 ] , maxLength ) ;
264- if ( derived ) chunkMessages . push ( derived ) ;
265- }
266- }
267- } catch ( error ) {
268- console . warn ( `Failed to process chunk ${ i + 1 } :` , error instanceof Error ? error . message : 'Unknown error' ) ;
269- }
270- }
271-
272- if ( chunkMessages . length === 0 ) {
273- // Fallback: summarize per-file names only to craft a high-level message
274- const fileNames = splitDiffByFile ( diff )
275- . map ( block => {
276- const first = block . split ( '\n' , 1 ) [ 0 ] || '' ;
277- const parts = first . split ( ' ' ) ;
278- return parts [ 2 ] ?. replace ( 'a/' , '' ) || '' ;
279- } )
280- . filter ( Boolean )
281- . slice ( 0 , 15 ) ;
282-
283- const fallbackPrompt = `Generate a single, concise commit message (<= ${ maxLength } chars) summarizing changes across these files:\n${ fileNames . map ( f => `- ${ f } ` ) . join ( '\n' ) } ` ;
284-
285- try {
286- const completion = await createChatCompletion (
287- apiKey ,
288- model ,
289- [
290- { role : 'system' , content : generatePrompt ( locale , maxLength , type ) } ,
291- { role : 'user' , content : fallbackPrompt } ,
292- ] ,
293- 0.7 ,
294- 1 ,
295- 0 ,
296- 0 ,
297- Math . max ( 200 , maxLength * 8 ) ,
298- 1 ,
299- timeout ,
300- proxy
301- ) ;
302- const texts = ( completion . choices || [ ] )
303- . map ( c => c . message ?. content )
304- . filter ( Boolean ) as string [ ] ;
305- if ( texts . length > 0 ) return [ sanitizeMessage ( texts [ 0 ] ) ] ;
306- } catch { }
307-
308- throw new KnownError ( 'Failed to generate commit messages for any chunks' ) ;
309- }
310-
311- // If we have multiple chunk messages, try to combine them intelligently
312- if ( chunkMessages . length > 1 ) {
313- const combinedPrompt = `I have ${ chunkMessages . length } commit messages for different parts of a large change:
314-
315- ${ chunkMessages . map ( ( msg , i ) => `${ i + 1 } . ${ msg } ` ) . join ( '\n' ) }
316-
317- Please generate a single, comprehensive commit message that captures the overall changes.
318- The message should be concise but cover the main aspects of all the changes.` ;
319-
320- try {
321- const combinedMessages = await generateCommitMessage (
322- apiKey ,
323- model ,
324- locale ,
325- combinedPrompt ,
326- completions ,
327- maxLength ,
328- type ,
329- timeout ,
330- proxy
331- ) ;
332-
333- return combinedMessages ;
334- } catch ( error ) {
335- // If combining fails, return the individual chunk messages
336- return chunkMessages ;
337- }
338- }
339-
340- return chunkMessages ;
341- } ;
342123
343124export const generateCommitMessageFromSummary = async (
344125 apiKey : string ,
0 commit comments