Skip to content

Commit

Permalink
fix the progress bar issue
Browse files Browse the repository at this point in the history
  • Loading branch information
huangjien committed May 3, 2024
1 parent fd6498b commit f622b93
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 33 deletions.
54 changes: 27 additions & 27 deletions extension.js
Original file line number Diff line number Diff line change
Expand Up @@ -42,34 +42,28 @@ function activate(context) {
const converter = new Showdown.Converter();

// You could prompt the user for these or use configuration settings
// const jobUrl = vscode.workspace
// .getConfiguration()
// .get('jenkins-log-reader.jenkinsUrl');
const username = vscode.workspace
.getConfiguration()
.get('jenkins-log-reader.jenkinsUsername');
const apiToken = vscode.workspace
.getConfiguration()
.get('jenkins-log-reader.jenkinsToken');

const logSize = getConfig('jenkins-log-reader.jenkinsLogSize');
const username = getConfig('jenkins-log-reader.jenkinsUsername');
const apiToken = getConfig('jenkins-log-reader.jenkinsToken');

if (!username || !apiToken) {
vscode.window.showInformationMessage(
'Please configure your Jenkins settings.'
);
return;
}

const localAiUrl = vscode.workspace
.getConfiguration()
.get('jenkins-log-reader.aiModelUrl');
const localAiUrl = getConfig('jenkins-log-reader.aiModelUrl');

const model = vscode.workspace
.getConfiguration()
.get('jenkins-log-reader.aiModel');
const model = getConfig('jenkins-log-reader.aiModel');

const prompt = getConfig('jenkins-log-reader.aiPrompt');

const temperature = getConfig('jenkins-log-reader.aiTemperature');

const maxToken = getConfig('jenkins-log-reader.aiMaxToken');

const prompt = vscode.workspace
.getConfiguration()
.get('jenkins-log-reader.aiPrompt');

if (!localAiUrl || !model) {
vscode.window.showInformationMessage(
Expand All @@ -92,7 +86,7 @@ function activate(context) {

fetchJenkinsLog(jobUrl, username, apiToken).then(log => {
if (log) {
const info = keepLongTail(log)
const info = keepLongTail(log, logSize)
const panel = vscode.window.createWebviewPanel(
'jenkinsLog',
'Jenkins Log',
Expand All @@ -101,7 +95,7 @@ function activate(context) {
panel.webview.html = `<br/><details><summary>${jobUrl}</summary><pre>${escapeHtml(info)}</pre></details><br/>`;
const promptString = prompt.replace('$PROMPT$', info);
// analyse with local AI
const longRunTask = aiAnalyse(localAi, model, promptString, panel, converter);
const longRunTask = aiAnalyse(localAi, model, promptString, panel, converter, temperature, maxToken);
showStatusBarProgress(longRunTask);
}
}).catch((err) => {
Expand All @@ -116,6 +110,12 @@ function activate(context) {
context.subscriptions.push(disposable);
}

function getConfig(config_key) {
return vscode.workspace
.getConfiguration()
.get(config_key);
}

function showStatusBarProgress(task) {
vscode.window.withProgress(
{
Expand All @@ -130,12 +130,12 @@ function showStatusBarProgress(task) {
}


async function aiAnalyse(localAi, model, promptString, panel, converter) {
localAi.chat.completions.create({
async function aiAnalyse(localAi, model, promptString, panel, converter, temperature, maxToken) {
await localAi.chat.completions.create({
model: model,
messages: [{ role: 'assistant', content: promptString }],
temperature: 0.8,
max_tokens: 8192,
temperature: temperature,
max_tokens: maxToken,
}).then(data => {
return JSON.stringify(data);
}).then(data => {
Expand All @@ -151,9 +151,9 @@ async function aiAnalyse(localAi, model, promptString, panel, converter) {
}

// sometimes, the log is too long. we believe that 5k should be enough.
function keepLongTail(inputString) {
if (inputString.length > 5120) {
return inputString.slice(-5120);
function keepLongTail(inputString , size) {
if (inputString.length > size) {
return inputString.slice(-size);
}
return inputString;
}
Expand Down
29 changes: 23 additions & 6 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "jenkins-log-reader",
"displayName": "jenkins-log-reader",
"description": "read jenkins log, analyse with local AI.",
"version": "0.0.13",
"version": "0.0.14",
"engines": {
"vscode": "^1.88.0"
},
Expand Down Expand Up @@ -32,11 +32,11 @@
"configuration": {
"title": "Jenkins Job Log Reader Configuration",
"properties": {
"jenkins-log-reader.jenkinsUrl": {
"type": "string",
"default": "",
"jenkins-log-reader.jenkinsLogSize": {
"type": "number",
"default": 5120,
"order": 0,
"description": "Jenkins Instance URL"
"description": "Jenkins Log Size for Analysis"
},
"jenkins-log-reader.jenkinsUsername": {
"type": "string",
Expand All @@ -52,6 +52,7 @@
},
"jenkins-log-reader.aiModelUrl": {
"type": "string",
"title": "AI Model URL",
"default": "http://localhost:11434/v1",
"order": 3,
"description": "Local AI Model URL"
Expand All @@ -63,14 +64,30 @@
"qwen:7b"
],
"default": "llama3",
"title": "AI Model",
"order": 4,
"description": "AI Model"
},
"jenkins-log-reader.aiPrompt": {
"type": "string",
"title": "AI Prompt",
"default": "Please analyse below jenkins job log, figure out the failure reason:\n$PROMPT$",
"order": 5,
"description": "Local AI Model URL"
"description": "Local AI Prompt, $PROMPT$ will be replaced by log information."
},
"jenkins-log-reader.aiTemperature": {
"type": "number",
"title": "AI Temperature",
"default": 0.3,
"order": 6,
"description": "The more temperature is, the model will use more \"creativity\", and the less temperature instruct model to be \"less creative\", but following your prompt stronger."
},
"jenkins-log-reader.aiMaxToken": {
"type": "number",
"title": "AI Max Token",
"default": 8192,
"order": 7,
"description": "Max token response from AI model."
}
}
},
Expand Down
Binary file modified resources/jenkins_reader.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit f622b93

Please sign in to comment.