Skip to content

Commit 5f9ace4

Browse files
authored
Linted JS code, packaged Prism (#884)
* Packaged prism. * More exports, code cleanup to address eslint issues.
1 parent e631405 commit 5f9ace4

File tree

4 files changed

+22
-49
lines changed

4 files changed

+22
-49
lines changed

scalene/scalene-gui/index.html.template

-3
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,6 @@
7979
gtag('config', 'G-4JXPHEBMTY');
8080
</script>
8181

82-
<script>
83-
{{ prism_js }}
84-
</script>
8582
<script>
8683
{{ tablesort_js }}
8784
</script>

scalene/scalene-gui/prism.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ var _self =
1818
* @namespace
1919
* @public
2020
*/
21-
var Prism = (function (_self) {
21+
export var Prism = (function (_self) {
2222
// Private helper vars
2323
var lang = /(?:^|\s)lang(?:uage)?-([\w-]+)(?=\s|$)/i;
2424
var uniqueId = 0;

scalene/scalene-gui/scalene-gui-bundle.js

+1-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

scalene/scalene-gui/scalene-gui.js

+20-44
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@ import {
44
} from "@aws-sdk/client-bedrock-runtime";
55
import { Buffer } from "buffer";
66
window.Buffer = Buffer;
7-
8-
/// <reference types="aws-sdk" />
7+
import { Prism } from "./prism.js";
98

109
export function vsNavigate(filename, lineno) {
1110
// If we are in VS Code, clicking on a line number in Scalene's web UI will navigate to that line in the source code.
@@ -16,7 +15,9 @@ export function vsNavigate(filename, lineno) {
1615
filePath: filename,
1716
lineNumber: lineno,
1817
});
19-
} catch {}
18+
} catch {
19+
// Do nothing
20+
}
2021
}
2122

2223
function generateScaleneOptimizedCodeRequest(
@@ -109,10 +110,6 @@ function generateScaleneOptimizedCodeRequest(
109110
return promptParts.join("");
110111
}
111112

112-
const recommendedLibraries = ["Cython", "Dask"]; // Add any domain-specific libraries here
113-
114-
// const prompt = generateScaleneOptimizedCodeRequest(context, sourceCode, line, recommendedLibraries, true);
115-
116113
function extractPythonCodeBlock(markdown) {
117114
// Pattern to match code blocks optionally tagged with "python"
118115
// - ``` optionally followed by "python"
@@ -186,7 +183,9 @@ function checkApiKey(apiKey) {
186183
(async () => {
187184
try {
188185
window.localStorage.setItem("scalene-api-key", apiKey);
189-
} catch {}
186+
} catch {
187+
// Do nothing if key not found
188+
}
190189
// If the API key is empty, clear the status indicator.
191190
if (apiKey.length === 0) {
192191
document.getElementById("valid-api-key").innerHTML = "";
@@ -237,7 +236,7 @@ function extractCode(text) {
237236
return code_block;
238237
}
239238

240-
async function sendPromptToOpenAI(prompt, len, apiKey) {
239+
async function sendPromptToOpenAI(prompt, apiKey) {
241240
const endpoint = "https://api.openai.com/v1/chat/completions";
242241
const model = document.getElementById("language-model-openai").value;
243242

@@ -312,7 +311,7 @@ async function sendPromptToOpenAI(prompt, len, apiKey) {
312311
}
313312
}
314313

315-
async function sendPromptToAzureOpenAI(prompt, len, apiKey, apiUrl, aiModel) {
314+
async function sendPromptToAzureOpenAI(prompt, apiKey, apiUrl, aiModel) {
316315
const apiVersion = document.getElementById("azure-api-model-version").value;
317316
const endpoint = `${apiUrl}/openai/deployments/${aiModel}/chat/completions?api-version=${apiVersion}`;
318317

@@ -375,7 +374,7 @@ async function sendPromptToAzureOpenAI(prompt, len, apiKey, apiUrl, aiModel) {
375374
}
376375
}
377376

378-
async function sendPromptToAmazon(prompt, len) {
377+
async function sendPromptToAmazon(prompt) {
379378
const accessKeyId =
380379
document.getElementById("aws-access-key").value ||
381380
localStorage.getItem("aws-access-key");
@@ -387,9 +386,6 @@ async function sendPromptToAmazon(prompt, len) {
387386
localStorage.getItem("aws-region") ||
388387
"us-east-1";
389388

390-
// Format the prompt
391-
const formattedPrompt = `Human: ${prompt}\nAssistant:`;
392-
393389
// Configure AWS Credentials
394390
const credentials = {
395391
accessKeyId: accessKeyId,
@@ -406,7 +402,7 @@ async function sendPromptToAmazon(prompt, len) {
406402
"modelId": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
407403
"body": JSON.stringify({
408404
"anthropic_version": "bedrock-2023-05-31",
409-
"max_tokens": 1024,
405+
"max_tokens": 65536, // arbitrary large number
410406
"messages": [
411407
{
412408
"role": "user",
@@ -438,7 +434,7 @@ async function sendPromptToAmazon(prompt, len) {
438434
}
439435
}
440436

441-
async function sendPromptToOllama(prompt, len, model, ipAddr, portNum) {
437+
async function sendPromptToOllama(prompt, model, ipAddr, portNum) {
442438
const url = `http://${ipAddr}:${portNum}/api/chat`;
443439
const headers = { "Content-Type": "application/json" };
444440
const body = JSON.stringify({
@@ -589,16 +585,6 @@ async function optimizeCode(imports, code, line, context) {
589585

590586
const optimizePerformancePrompt = `Optimize the following${lineOf}Python code:\n\n${context}\n\n# Start of code\n\n${code}\n\n# End of code\n\nRewrite the above Python code only from "Start of code" to "End of code", to make it more efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all these imports; do NOT include them in the optimized code:\n\n${imports}\n\nUse native libraries if that would make it faster than pure Python. Consider using the following other libraries, if appropriate:\n\n${libraries}\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text "# Proposed optimization:". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. Use vectorized operations${useGPUstring}whenever it would substantially increase performance, and quantify the speedup in terms of orders of magnitude. Eliminate as many for loops, while loops, and list or dict comprehensions as possible, replacing them with vectorized equivalents. If the performance is not likely to increase, leave the code unchanged. Fix any errors in the optimized code. Optimized${lineOf}code:`;
591587

592-
const context_ollama = "";
593-
const optimizePerformancePrompt_ollama_prev = `Optimize the following${lineOf}Python code:\n\n${context_ollama}\n\n# Start of code\n\n${code}\n\n# End of code\n\nRewrite the above Python code only from "Start of code" to "End of code", to make it more efficient WITHOUT CHANGING ITS RESULTS. Only output your result in JSON, with the optimized code in "code". Optimized${lineOf}code:`;
594-
595-
const optimizePerformancePrompt_ollama_pp = `Rewrite the following Python code to make it run faster. Use vectorization if possible, eliminating as many loops as possible. Try to reduce computational complexity of operations. Only output the optimized code in JSON with the key 'code'. Original code: ${code}. Optimized code:`;
596-
597-
// TODO parameterize based on CPU utilization, Python vs. C time, GPU choice, memory efficiency.
598-
const optimizePerformancePrompt_ollama = `# Original code\n${code}\n\n# This code is an optimized version of the original code that dramatically improves its performance. Whenever possible, the code has been changed to use native libraries and vectorization, and data structures like lists have been replaced by sets or dicts. Do not change any function names. Optimized code:\n`;
599-
600-
const pure_optimizePerformancePrompt = `Optimize the following${lineOf}Python code:\n\n${context}\n\n# Start of code\n\n${code}\n\n# End of code\n\nRewrite the above Python code only from "Start of code" to "End of code", to make it more efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all these imports; do NOT include them in the optimized code:\n\n${imports}\n\nONLY USE PURE PYTHON.\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text "# Proposed optimization:". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. If the performance is not likely to increase, leave the code unchanged. Fix any errors in the optimized code. Optimized${lineOf}code:`;
601-
602588
const memoryEfficiencyPrompt = `Optimize the following${lineOf} Python code:\n\n${context}\n\n# Start of code\n\n${code}\n\n\n# End of code\n\nRewrite the above Python code only from "Start of code" to "End of code", to make it more memory-efficient WITHOUT CHANGING ITS RESULTS. Assume the code has already executed all these imports; do NOT include them in the optimized code:\n\n${imports}\n\nUse native libraries if that would make it more space efficient than pure Python. Consider using the following other libraries, if appropriate:\n\n${libraries}\n\nYour output should only consist of valid Python code. Output the resulting Python with brief explanations only included as comments prefaced with #. Include a detailed explanatory comment before the code, starting with the text "# Proposed optimization:". Make the code as clear and simple as possible, while also making it as fast and memory-efficient as possible. Use native libraries whenever possible to reduce memory consumption; invoke del on variables and array elements as soon as it is safe to do so. If the memory consumption is not likely to be reduced, leave the code unchanged. Fix any errors in the optimized code. Optimized${lineOf}code:`;
603589

604590
const optimizePerf = document.getElementById("optimize-performance").checked;
@@ -613,15 +599,11 @@ async function optimizeCode(imports, code, line, context) {
613599
// Just use big prompt maybe FIXME
614600
prompt = bigPrompt;
615601

616-
// Use number of words in the original code as a proxy for the number of tokens.
617-
const numWords = code.match(/\b\w+\b/g).length;
618-
619602
switch (document.getElementById("service-select").value) {
620603
case "openai": {
621604
console.log(prompt);
622605
const result = await sendPromptToOpenAI(
623606
prompt,
624-
Math.max(numWords * 4, 500),
625607
apiKey,
626608
);
627609
return extractCode(result);
@@ -632,7 +614,6 @@ async function optimizeCode(imports, code, line, context) {
632614
// console.log(optimizePerformancePrompt_ollama);
633615
const result = await sendPromptToOllama(
634616
prompt, // optimizePerformancePrompt_ollama,
635-
Math.max(numWords * 4, 500),
636617
document.getElementById("language-model-local").value,
637618
document.getElementById("local-ip").value,
638619
document.getElementById("local-port").value,
@@ -648,7 +629,6 @@ async function optimizeCode(imports, code, line, context) {
648629
console.log(prompt);
649630
const result = await sendPromptToAmazon(
650631
prompt,
651-
Math.max(numWords * 4, 500),
652632
);
653633
return extractCode(result);
654634
}
@@ -659,7 +639,6 @@ async function optimizeCode(imports, code, line, context) {
659639
let azureOpenAiModel = document.getElementById("azure-api-model").value;
660640
const result = await sendPromptToAzureOpenAI(
661641
prompt,
662-
Math.max(numWords * 4, 500),
663642
apiKey,
664643
azureOpenAiEndpoint,
665644
azureOpenAiModel,
@@ -806,7 +785,6 @@ function time_consumed_str(time_in_ms) {
806785
let hours = Math.floor(time_in_ms / 3600000);
807786
let minutes = Math.floor((time_in_ms % 3600000) / 60000);
808787
let seconds = Math.floor((time_in_ms % 60000) / 1000);
809-
let hours_exact = time_in_ms / 3600000;
810788
let minutes_exact = (time_in_ms % 3600000) / 60000;
811789
let seconds_exact = (time_in_ms % 60000) / 1000;
812790
if (hours > 0) {
@@ -946,8 +924,8 @@ function makeGPUPie(util, gpu_device, params) {
946924
autosize: {
947925
contains: "padding",
948926
},
949-
width: 30,
950-
height: 20,
927+
width: params.width, // 30,
928+
height: params.height, // 20,
951929
padding: 0,
952930
data: {
953931
values: [
@@ -1159,7 +1137,7 @@ function makeMemoryBar(memory, title, python_percent, total, color, params) {
11591137
}
11601138

11611139
function makeSparkline(samples, max_x, max_y, leak_velocity = 0, params) {
1162-
const values = samples.map((v, i) => {
1140+
const values = samples.map((v) => {
11631141
let leak_str = "";
11641142
if (leak_velocity != 0) {
11651143
leak_str = `; possible leak (${memory_consumed_str(leak_velocity)}/s)`;
@@ -1181,7 +1159,6 @@ function makeSparkline(samples, max_x, max_y, leak_velocity = 0, params) {
11811159
params.height -= 10; // FIXME should be actual height of font
11821160
}
11831161

1184-
const strokeWidth = 1; // 0.25;
11851162
return {
11861163
$schema: "https://vega.github.io/schema/vega-lite/v5.json",
11871164
data: { values: values },
@@ -1594,7 +1571,7 @@ function makeProfileLine(
15941571
gpu_pies.push(
15951572
makeGPUPie(line.n_gpu_percent, prof.gpu_device, {
15961573
height: 20,
1597-
width: 100,
1574+
width: 30,
15981575
}),
15991576
);
16001577
// gpu_pies.push(makeGPUBar(line.n_gpu_percent, prof.gpu_device, { height: 20, width: 100 }));
@@ -1813,7 +1790,6 @@ async function display(prof) {
18131790
let cpu_native = 0;
18141791
let cpu_system = 0;
18151792
let mem_python = 0;
1816-
let mem_native = 0;
18171793
let max_alloc = 0;
18181794
let cp = {};
18191795
let cn = {};
@@ -2035,7 +2011,7 @@ async function display(prof) {
20352011
`F${escape(ff[0])}-nonline`,
20362012
);
20372013
for (let i = 0; i < allHeaders.length; i++) {
2038-
allHeaders[i].addEventListener("click", (e) => {
2014+
allHeaders[i].addEventListener("click", () => {
20392015
const all = document.getElementsByClassName(
20402016
`F${escape(ff[0])}-blankline`,
20412017
);
@@ -2050,7 +2026,7 @@ async function display(prof) {
20502026
for (const ff of files) {
20512027
document
20522028
.getElementById(`${escape(ff[0])}-lineProfile`)
2053-
.addEventListener("click", (e) => {
2029+
.addEventListener("click", () => {
20542030
const all = document.getElementsByClassName(
20552031
`F${escape(ff[0])}-blankline`,
20562032
);
@@ -2120,7 +2096,7 @@ export function load(profile) {
21202096
})();
21212097
}
21222098

2123-
function loadFetch() {
2099+
export function loadFetch() {
21242100
(async () => {
21252101
let resp = await fetch("profile.json");
21262102
let profile = await resp.json();
@@ -2142,7 +2118,7 @@ function doSomething(e) {
21422118
load(profile);
21432119
}
21442120

2145-
function loadDemo() {
2121+
export function loadDemo() {
21462122
load(example_profile);
21472123
}
21482124

0 commit comments

Comments
 (0)