@@ -4,6 +4,8 @@ import OpenAI from "openai";
44import { Octokit } from "@octokit/rest" ;
55import parseDiff , { Chunk , File } from "parse-diff" ;
66import minimatch from "minimatch" ;
7+ import { parse } from "path" ;
8+ import { APIPromise } from "openai/core" ;
79
810const GITHUB_TOKEN : string = core . getInput ( "GITHUB_TOKEN" ) ;
911const OPENAI_API_KEY : string = core . getInput ( "OPENAI_API_KEY" ) ;
@@ -61,7 +63,15 @@ async function analyzeCode(
6163 prDetails : PRDetails
6264) : Promise < Array < { body : string ; path : string ; line : number } > > {
6365 const comments : Array < { body : string ; path : string ; line : number } > = [ ] ;
64-
66+ if ( parsedDiff . length > 10 ) {
67+ // Too many files to review
68+ comments . push ( {
69+ body : "This pull request has too many files to review (more than 10). Please split it into smaller pull requests. This is for cost purposes." ,
70+ path : "" ,
71+ line : 0 ,
72+ } ) ;
73+ return comments ;
74+ }
6575 for ( const file of parsedDiff ) {
6676 if ( file . to === "/dev/null" ) continue ; // Ignore deleted files
6777 for ( const chunk of file . chunks ) {
@@ -79,9 +89,10 @@ async function analyzeCode(
7989}
8090
8191function createPrompt ( file : File , chunk : Chunk , prDetails : PRDetails ) : string {
82- return `Your task is to review pull requests. Instructions:
92+ return `You are an expert Expo/React Native developer. Your task is to review pull requests. Instructions:
8393- Provide the response in following JSON format: {"reviews": [{"lineNumber": <line_number>, "reviewComment": "<review comment>"}]}
8494- Do not give positive comments or compliments.
95+ - Only comment about javascript code. Do not comment about css classes.
8596- Provide comments and suggestions ONLY if there is something to improve, otherwise "reviews" should be an empty array.
8697- Write the comment in GitHub Markdown format.
8798- Use the given description only for the overall context and only comment the code.
@@ -123,13 +134,12 @@ async function getAIResponse(prompt: string): Promise<Array<{
123134 presence_penalty : 0 ,
124135 } ;
125136
137+ let response : OpenAI . Chat . Completions . ChatCompletion | null = null ;
126138 try {
127- const response = await openai . chat . completions . create ( {
139+ response = await openai . chat . completions . create ( {
128140 ...queryConfig ,
129141 // return JSON if the model supports it:
130- ...( OPENAI_API_MODEL === "gpt-4-1106-preview"
131- ? { response_format : { type : "json_object" } }
132- : { } ) ,
142+ response_format : { type : "json_object" as const } ,
133143 messages : [
134144 {
135145 role : "system" ,
@@ -138,10 +148,22 @@ async function getAIResponse(prompt: string): Promise<Array<{
138148 ] ,
139149 } ) ;
140150
151+ const finish_response = response . choices [ 0 ] . finish_reason ;
152+ if ( finish_response === "length" ) {
153+ console . log (
154+ "The maximum context length has been exceeded. Please reduce the length of the code snippets."
155+ ) ;
156+ return null ;
157+ }
158+
141159 const res = response . choices [ 0 ] . message ?. content ?. trim ( ) || "{}" ;
142- return JSON . parse ( res ) . reviews ;
160+ if ( res . startsWith ( "```json" ) ) {
161+ return JSON . parse ( res . slice ( 7 , - 3 ) ) . reviews
162+ } else {
163+ return JSON . parse ( res ) . reviews ;
164+ }
143165 } catch ( error ) {
144- console . error ( "Error:" , error ) ;
166+ console . error ( "Error:" , error , response ?. choices [ 0 ] . message ?. content ) ;
145167 return null ;
146168 }
147169}
0 commit comments