diff --git a/_sass/color_schemes/onnxruntime.scss b/_sass/color_schemes/onnxruntime.scss index 4e0cc934e1881..a5e5cd7de7a14 100644 --- a/_sass/color_schemes/onnxruntime.scss +++ b/_sass/color_schemes/onnxruntime.scss @@ -2,12 +2,83 @@ $link-color: #226aca; $btn-primary-color: #226aca; // Code is too light in default theme // -.highlight .n { - color: #555 !important; -} -.highlight .nn { - color: #555 !important; -} -.highlight .c1 { - color: #188616 !important; -} +// .highlight .n { +// color: #555 !important; +// } +// .highlight .nn { +// color: #555 !important; +// } +// .highlight .c1 { +// color: #188616 !important; +// } + +.highlight .hll { background-color: #49483e; } +.highlight { background: #272822; color: #f8f8f2; } +.highlight .c { color: #949076; } +.highlight .err { background-color: #1e0010; color: #eb0083; } +.highlight .k { color: #66d9ef; } +.highlight .l { color: #ae81ff; } +.highlight .n { color: #f8f8f2; } +.highlight .o { color: #f94e8a; } +.highlight .p { color: #f8f8f2; } +.highlight .ch { color: #949076; } +.highlight .cm { color: #949076; } +.highlight .cp { color: #949076; } +.highlight .cpf { color: #949076; } +.highlight .c1 { color: #949076; } +.highlight .cs { color: #949076; } +.highlight .gd { color: #f94e8a; } +.highlight .ge { font-style: italic; } +.highlight .gi { color: #a6e22e; } +.highlight .gs { font-weight: bold; } +.highlight .gu { color: #949076; } +.highlight .kc { color: #66d9ef; } +.highlight .kd { color: #66d9ef; } +.highlight .kn { color: #f94e8a; } +.highlight .kp { color: #66d9ef; } +.highlight .kr { color: #66d9ef; } +.highlight .kt { color: #66d9ef; } +.highlight .ld { color: #e6db74; } +.highlight .m { color: #ae81ff; } +.highlight .s { color: #e6db74; } +.highlight .na { color: #a6e22e; } +.highlight .nb { color: #f8f8f2; } +.highlight .nc { color: #a6e22e; } +.highlight .no { color: #66d9ef; } +.highlight .nd { color: #a6e22e; } +.highlight .ni { color: #f8f8f2; } +.highlight .ne { color: #a6e22e; } +.highlight .nf { color: #a6e22e; } +.highlight .nl { color: #f8f8f2; } +.highlight .nn { color: #f8f8f2; } +.highlight .nx { color: #a6e22e; } +.highlight .py { color: #f8f8f2; } +.highlight .nt { color: #f94e8a; } +.highlight .nv { color: #f8f8f2; } +.highlight .ow { color: #f94e8a; } +.highlight .w { color: #f8f8f2; } +.highlight .mb { color: #ae81ff; } +.highlight .mf { color: #ae81ff; } +.highlight .mh { color: #ae81ff; } +.highlight .mi { color: #ae81ff; } +.highlight .mo { color: #ae81ff; } +.highlight .sa { color: #e6db74; } +.highlight .sb { color: #e6db74; } +.highlight .sc { color: #e6db74; } +.highlight .dl { color: #e6db74; } +.highlight .sd { color: #e6db74; } +.highlight .s2 { color: #e6db74; } +.highlight .se { color: #ae81ff; } +.highlight .sh { color: #e6db74; } +.highlight .si { color: #e6db74; } +.highlight .sx { color: #e6db74; } +.highlight .sr { color: #e6db74; } +.highlight .s1 { color: #e6db74; } +.highlight .ss { color: #e6db74; } +.highlight .bp { color: #f8f8f2; } +.highlight .fm { color: #a6e22e; } +.highlight .vc { color: #f8f8f2; } +.highlight .vg { color: #f8f8f2; } +.highlight .vi { color: #f8f8f2; } +.highlight .vm { color: #f8f8f2; } +.highlight .il { color: #ae81ff; } \ No newline at end of file diff --git a/docs/tutorials/on-device-training/ios-app.md b/docs/tutorials/on-device-training/ios-app.md index 76f485a2e2648..fff1347923ef0 100644 --- a/docs/tutorials/on-device-training/ios-app.md +++ b/docs/tutorials/on-device-training/ios-app.md @@ -15,7 +15,7 @@ In this tutorial, we will build a simple speaker identification app that learns Here is what the application will look like: - +application demo, with buttons for voice, train, and infer. ## Introduction We will guide you through the process of building an iOS application that can train a simple audio classification model using on-device training techniques. The tutorial showcases the `transfer learning` technique where knowledge gained from training a model on one task is leveraged to improve the performance of a model on a different but related task. Instead of starting the learning process from scratch, transfer learning allows us to transfer the knowledge or features learned by a pre-trained model to a new task. @@ -30,28 +30,22 @@ In the tutorial, we will: ## Contents -- [Introduction](#introduction) -- [Prerequisites](#prerequisites) -- [Generating the training artifacts](#generating-the-training-artifacts) - - [Export the model to ONNX](#export-the-model-to-onnx) - - [Define the trainable and non trainable parameters](#define-the-trainable-and-non-trainable-parameters) - - [Generate the training artifacts](#generate-the-training-artifacts) - -- [Building the iOS application](#building-the-ios-application) +- [Building an iOS Application](#building-an-ios-application) + - [Introduction](#introduction) + - [Contents](#contents) + - [Prerequisites](#prerequisites) + - [Generating the training artifacts](#generating-the-training-artifacts) + - [Building the iOS application](#building-the-ios-application) - [Xcode Setup](#xcode-setup) - [Application Overview](#application-overview) - [Training the model](#training-the-model) - - [Loading the training artifacts and initializing training session](#loading-the-training-artifacts-and-initializing-training-session) - - [Training the model](#training-the-model-1) - - [Exporting the trained model](#exporting-the-trained-model) - - [Inference with the trained model](#inference-with-the-trained-model) - [Recording Audio](#recording-audio) - [Train View](#train-view) - [Infer View](#infer-view) - [ContentView](#contentview) -- [Running the iOS application](#running-the-ios-application) -- [Conclusion](#conclusion) + - [Running the iOS application](#running-the-ios-application) + - [Conclusion](#conclusion) ## Prerequisites